code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
from argparse import ArgumentParser from .env import EnvironmentCommand def __lowercase ( ): a__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' ) a__ = parser.add_subparsers(help='diffusers-cli command helpers' ) # Register commands EnvironmentCommand.register_subcommand(__lowerCAmelCase ) # Let's go a__ = parser.parse_args() if not hasattr(__lowerCAmelCase , 'func' ): parser.print_help() exit(1 ) # Run a__ = args.func(__lowerCAmelCase ) service.run() if __name__ == "__main__": main()
240
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer snake_case : Dict = logging.get_logger(__name__) snake_case : Dict = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} snake_case : List[Any] = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } snake_case : int = { '''distilbert-base-uncased''': 5_12, '''distilbert-base-uncased-distilled-squad''': 5_12, '''distilbert-base-cased''': 5_12, '''distilbert-base-cased-distilled-squad''': 5_12, '''distilbert-base-german-cased''': 5_12, '''distilbert-base-multilingual-cased''': 5_12, } snake_case : Union[str, Any] = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class snake_case_ (lowerCamelCase_ ): UpperCAmelCase__ : Any = VOCAB_FILES_NAMES UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__ : Tuple = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__ : Optional[Any] = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__ : Optional[int] = DistilBertTokenizer def __init__( self :Dict ,__snake_case :Dict=None ,__snake_case :Optional[Any]=None ,__snake_case :Optional[Any]=True ,__snake_case :List[Any]="[UNK]" ,__snake_case :str="[SEP]" ,__snake_case :List[Any]="[PAD]" ,__snake_case :Tuple="[CLS]" ,__snake_case :Optional[int]="[MASK]" ,__snake_case :Dict=True ,__snake_case :Dict=None ,**__snake_case :List[Any] ,) -> Optional[int]: super().__init__( __snake_case ,tokenizer_file=__snake_case ,do_lower_case=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,pad_token=__snake_case ,cls_token=__snake_case ,mask_token=__snake_case ,tokenize_chinese_chars=__snake_case ,strip_accents=__snake_case ,**__snake_case ,) a__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' ,__snake_case ) != do_lower_case or normalizer_state.get('strip_accents' ,__snake_case ) != strip_accents or normalizer_state.get('handle_chinese_chars' ,__snake_case ) != tokenize_chinese_chars ): a__ = getattr(__snake_case ,normalizer_state.pop('type' ) ) a__ = do_lower_case a__ = strip_accents a__ = tokenize_chinese_chars a__ = normalizer_class(**__snake_case ) a__ = do_lower_case def lowerCamelCase__( self :Any ,__snake_case :List[str] ,__snake_case :int=None ) -> Dict: a__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase__( self :List[str] ,__snake_case :List[int] ,__snake_case :Optional[List[int]] = None ) -> List[int]: a__ = [self.sep_token_id] a__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__( self :Union[str, Any] ,__snake_case :str ,__snake_case :Optional[str] = None ) -> Tuple[str]: a__ = self._tokenizer.model.save(__snake_case ,name=__snake_case ) return tuple(__snake_case )
240
1
import cva import numpy as np class UpperCAmelCase__ : """simple docstring""" def __init__( self : str , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[Any]: if k in (0.04, 0.06): SCREAMING_SNAKE_CASE__ = k SCREAMING_SNAKE_CASE__ = window_size else: raise ValueError('''invalid k value''' ) def __str__( self : str ) -> str: return str(self.k ) def lowercase_ ( self : Dict , __lowerCamelCase : int ) -> tuple[cva.Mat, list[list[int]]]: SCREAMING_SNAKE_CASE__ = cva.imread(__lowerCamelCase , 0 ) SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = img.shape SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = img.copy() SCREAMING_SNAKE_CASE__ = cva.cvtColor(__lowerCamelCase , cva.COLOR_GRAY2RGB ) SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = np.gradient(__lowerCamelCase ) SCREAMING_SNAKE_CASE__ = dx**2 SCREAMING_SNAKE_CASE__ = dy**2 SCREAMING_SNAKE_CASE__ = dx * dy SCREAMING_SNAKE_CASE__ = 0.04 SCREAMING_SNAKE_CASE__ = self.window_size // 2 for y in range(__lowerCamelCase , h - offset ): for x in range(__lowerCamelCase , w - offset ): SCREAMING_SNAKE_CASE__ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE__ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE__ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() SCREAMING_SNAKE_CASE__ = (wxx * wyy) - (wxy**2) SCREAMING_SNAKE_CASE__ = wxx + wyy SCREAMING_SNAKE_CASE__ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[Any] = HarrisCorner(0.0_4, 3) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = edge_detect.detect('''path_to_image''') cva.imwrite('''detect.png''', color_img)
353
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) _SCREAMING_SNAKE_CASE : str = { '''sample_size''': 32, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 1000, '''block_out_channels''': [32, 64], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } _SCREAMING_SNAKE_CASE : Dict = { '''sample_size''': 64, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 1000, '''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } _SCREAMING_SNAKE_CASE : int = { '''sample_size''': 256, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } _SCREAMING_SNAKE_CASE : int = { '''num_train_timesteps''': 40, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } _SCREAMING_SNAKE_CASE : str = { '''num_train_timesteps''': 201, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } _SCREAMING_SNAKE_CASE : Tuple = { '''num_train_timesteps''': 151, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } def UpperCAmelCase_ ( _A ): '''simple docstring''' if isinstance(_A , _A ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def UpperCAmelCase_ ( _A , _A , _A , _A , _A=False ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.in_layers.0.weight'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.in_layers.0.bias'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.in_layers.2.weight'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.in_layers.2.bias'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.emb_layers.1.weight'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.emb_layers.1.bias'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.out_layers.0.weight'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.out_layers.0.bias'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.out_layers.3.weight'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.out_layers.3.bias'''] if has_skip: SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.skip_connection.weight'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def UpperCAmelCase_ ( _A , _A , _A , _A , _A=None ): '''simple docstring''' SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.norm.weight'''] SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.norm.bias'''] SCREAMING_SNAKE_CASE__ = weight_q.squeeze(-1 ).squeeze(-1 ) SCREAMING_SNAKE_CASE__ = bias_q.squeeze(-1 ).squeeze(-1 ) SCREAMING_SNAKE_CASE__ = weight_k.squeeze(-1 ).squeeze(-1 ) SCREAMING_SNAKE_CASE__ = bias_k.squeeze(-1 ).squeeze(-1 ) SCREAMING_SNAKE_CASE__ = weight_v.squeeze(-1 ).squeeze(-1 ) SCREAMING_SNAKE_CASE__ = bias_v.squeeze(-1 ).squeeze(-1 ) SCREAMING_SNAKE_CASE__ = ( checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) SCREAMING_SNAKE_CASE__ = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def UpperCAmelCase_ ( _A , _A ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = torch.load(_A , map_location='''cpu''' ) SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.0.weight'''] SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.0.bias'''] SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.2.weight'''] SCREAMING_SNAKE_CASE__ = checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: SCREAMING_SNAKE_CASE__ = checkpoint['''label_emb.weight'''] SCREAMING_SNAKE_CASE__ = checkpoint['''input_blocks.0.0.weight'''] SCREAMING_SNAKE_CASE__ = checkpoint['''input_blocks.0.0.bias'''] SCREAMING_SNAKE_CASE__ = unet_config['''down_block_types'''] SCREAMING_SNAKE_CASE__ = unet_config['''layers_per_block'''] SCREAMING_SNAKE_CASE__ = unet_config['''attention_head_dim'''] SCREAMING_SNAKE_CASE__ = unet_config['''block_out_channels'''] SCREAMING_SNAKE_CASE__ = 1 SCREAMING_SNAKE_CASE__ = channels_list[0] for i, layer_type in enumerate(_A ): SCREAMING_SNAKE_CASE__ = channels_list[i] SCREAMING_SNAKE_CASE__ = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(_A ): SCREAMING_SNAKE_CASE__ = F'''down_blocks.{i}.resnets.{j}''' SCREAMING_SNAKE_CASE__ = F'''input_blocks.{current_layer}.0''' SCREAMING_SNAKE_CASE__ = True if j == 0 and downsample_block_has_skip else False SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A , has_skip=_A ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(_A ): SCREAMING_SNAKE_CASE__ = F'''down_blocks.{i}.resnets.{j}''' SCREAMING_SNAKE_CASE__ = F'''input_blocks.{current_layer}.0''' SCREAMING_SNAKE_CASE__ = True if j == 0 and downsample_block_has_skip else False SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A , has_skip=_A ) SCREAMING_SNAKE_CASE__ = F'''down_blocks.{i}.attentions.{j}''' SCREAMING_SNAKE_CASE__ = F'''input_blocks.{current_layer}.1''' SCREAMING_SNAKE_CASE__ = convert_attention( _A , _A , _A , _A , _A ) current_layer += 1 if i != len(_A ) - 1: SCREAMING_SNAKE_CASE__ = F'''down_blocks.{i}.downsamplers.0''' SCREAMING_SNAKE_CASE__ = F'''input_blocks.{current_layer}.0''' SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A ) current_layer += 1 SCREAMING_SNAKE_CASE__ = current_channels # hardcoded the mid-block for now SCREAMING_SNAKE_CASE__ = '''mid_block.resnets.0''' SCREAMING_SNAKE_CASE__ = '''middle_block.0''' SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A ) SCREAMING_SNAKE_CASE__ = '''mid_block.attentions.0''' SCREAMING_SNAKE_CASE__ = '''middle_block.1''' SCREAMING_SNAKE_CASE__ = convert_attention(_A , _A , _A , _A , _A ) SCREAMING_SNAKE_CASE__ = '''mid_block.resnets.1''' SCREAMING_SNAKE_CASE__ = '''middle_block.2''' SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A ) SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = unet_config['''up_block_types'''] for i, layer_type in enumerate(_A ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.resnets.{j}''' SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer}.0''' SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A , has_skip=_A ) current_layer += 1 if i != len(_A ) - 1: SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.upsamplers.0''' SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer-1}.1''' SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.resnets.{j}''' SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer}.0''' SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A , has_skip=_A ) SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.attentions.{j}''' SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer}.1''' SCREAMING_SNAKE_CASE__ = convert_attention( _A , _A , _A , _A , _A ) current_layer += 1 if i != len(_A ) - 1: SCREAMING_SNAKE_CASE__ = F'''up_blocks.{i}.upsamplers.0''' SCREAMING_SNAKE_CASE__ = F'''output_blocks.{current_layer-1}.2''' SCREAMING_SNAKE_CASE__ = convert_resnet(_A , _A , _A , _A ) SCREAMING_SNAKE_CASE__ = checkpoint['''out.0.weight'''] SCREAMING_SNAKE_CASE__ = checkpoint['''out.0.bias'''] SCREAMING_SNAKE_CASE__ = checkpoint['''out.2.weight'''] SCREAMING_SNAKE_CASE__ = checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') _SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() _SCREAMING_SNAKE_CASE : List[str] = strabool(args.class_cond) _SCREAMING_SNAKE_CASE : int = os.path.basename(args.unet_path) print(F"Checkpoint: {ckpt_name}") # Get U-Net config if "imagenet64" in ckpt_name: _SCREAMING_SNAKE_CASE : Optional[Any] = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _SCREAMING_SNAKE_CASE : int = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: _SCREAMING_SNAKE_CASE : Union[str, Any] = TEST_UNET_CONFIG else: raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.") if not args.class_cond: _SCREAMING_SNAKE_CASE : Union[str, Any] = None _SCREAMING_SNAKE_CASE : int = con_pt_to_diffuser(args.unet_path, unet_config) _SCREAMING_SNAKE_CASE : Optional[int] = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: _SCREAMING_SNAKE_CASE : Optional[Any] = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: _SCREAMING_SNAKE_CASE : Any = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _SCREAMING_SNAKE_CASE : int = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.") _SCREAMING_SNAKE_CASE : int = CMStochasticIterativeScheduler(**scheduler_config) _SCREAMING_SNAKE_CASE : str = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
218
0
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def a_ ( SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def a_ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ): '''simple docstring''' _lowerCamelCase : int ={} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _lowerCamelCase : Any =key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' ) _lowerCamelCase : Tuple =key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' ) _lowerCamelCase : Optional[int] =key.replace('heads.cmd.itm_head.cls' , 'itm_head' ) _lowerCamelCase : Tuple =key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' ) _lowerCamelCase : Optional[int] =key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' ) _lowerCamelCase : List[str] =key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' ) _lowerCamelCase : Optional[int] =key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' ) _lowerCamelCase : List[Any] =key.replace('mm_text_projection' , 'flava.text_to_mm_projection' ) _lowerCamelCase : Any =key.replace('mm_image_projection' , 'flava.image_to_mm_projection' ) _lowerCamelCase : Dict =key.replace('image_encoder.module' , 'flava.image_model' ) _lowerCamelCase : Optional[Any] =key.replace('text_encoder.module' , 'flava.text_model' ) _lowerCamelCase : List[str] =key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' ) _lowerCamelCase : str =key.replace('mm_encoder.module' , 'flava.multimodal_model' ) _lowerCamelCase : Optional[int] =key.replace('text_projection' , 'flava.text_projection' ) _lowerCamelCase : str =key.replace('image_projection' , 'flava.image_projection' ) _lowerCamelCase : Dict =value.float() for key, value in codebook_state_dict.items(): _lowerCamelCase : int =value return upgrade @torch.no_grad() def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]=None ): '''simple docstring''' if config_path is not None: _lowerCamelCase : Tuple =FlavaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: _lowerCamelCase : Optional[int] =FlavaConfig() _lowerCamelCase : int =FlavaForPreTraining(SCREAMING_SNAKE_CASE__ ).eval() _lowerCamelCase : List[Any] =convert_dalle_checkpoint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , save_checkpoint=SCREAMING_SNAKE_CASE__ ) if os.path.exists(SCREAMING_SNAKE_CASE__ ): _lowerCamelCase : List[Any] =torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' ) else: _lowerCamelCase : List[Any] =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' ) _lowerCamelCase : Tuple =upgrade_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ ) _lowerCamelCase : str =hf_model.state_dict() _lowerCamelCase : Dict =count_parameters(SCREAMING_SNAKE_CASE__ ) _lowerCamelCase : int =count_parameters(SCREAMING_SNAKE_CASE__ ) + count_parameters(SCREAMING_SNAKE_CASE__ ) assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') lowerCamelCase = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
199
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
199
1
'''simple docstring''' class a__ : def __init__( self : List[Any] ): """simple docstring""" __lowerCamelCase = {} def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" print(self.vertex ) for i in self.vertex: print(a , ''' -> ''' , ''' -> '''.join([str(a ) for j in self.vertex[i]] ) ) def SCREAMING_SNAKE_CASE__ ( self : Any , a : int , a : int ): """simple docstring""" if from_vertex in self.vertex: self.vertex[from_vertex].append(a ) else: # else make a new vertex __lowerCamelCase = [to_vertex] def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(a , a ) def SCREAMING_SNAKE_CASE__ ( self : int , a : int , a : list ): """simple docstring""" __lowerCamelCase = True print(a , end=''' ''' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(a , a ) if __name__ == "__main__": __UpperCAmelCase = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("DFS:") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
353
'''simple docstring''' import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class a__ ( UpperCAmelCase__ ): lowerCamelCase : Dict =(DPMSolverSDEScheduler,) lowerCamelCase : List[str] =1_0 def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , **a : Optional[int] ): """simple docstring""" __lowerCamelCase = { '''num_train_timesteps''': 11_00, '''beta_start''': 0.00_01, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**a ) return config def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" for timesteps in [10, 50, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=a ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=a , beta_end=a ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=a ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=a ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ): """simple docstring""" __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**a ) scheduler.set_timesteps(self.num_inference_steps ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma __lowerCamelCase = sample.to(a ) for i, t in enumerate(scheduler.timesteps ): __lowerCamelCase = scheduler.scale_model_input(a , a ) __lowerCamelCase = model(a , a ) __lowerCamelCase = scheduler.step(a , a , a ) __lowerCamelCase = output.prev_sample __lowerCamelCase = torch.sum(torch.abs(a ) ) __lowerCamelCase = torch.mean(torch.abs(a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2 assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2 assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52 ) < 1e-3 else: assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2 assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config(prediction_type='''v_prediction''' ) __lowerCamelCase = scheduler_class(**a ) scheduler.set_timesteps(self.num_inference_steps ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma __lowerCamelCase = sample.to(a ) for i, t in enumerate(scheduler.timesteps ): __lowerCamelCase = scheduler.scale_model_input(a , a ) __lowerCamelCase = model(a , a ) __lowerCamelCase = scheduler.step(a , a , a ) __lowerCamelCase = output.prev_sample __lowerCamelCase = torch.sum(torch.abs(a ) ) __lowerCamelCase = torch.mean(torch.abs(a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2 assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2 assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97 ) < 1e-3 else: assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2 assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**a ) scheduler.set_timesteps(self.num_inference_steps , device=a ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: __lowerCamelCase = scheduler.scale_model_input(a , a ) __lowerCamelCase = model(a , a ) __lowerCamelCase = scheduler.step(a , a , a ) __lowerCamelCase = output.prev_sample __lowerCamelCase = torch.sum(torch.abs(a ) ) __lowerCamelCase = torch.mean(torch.abs(a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2 assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2 assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71 ) < 1e-3 else: assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2 assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26 ) < 1e-3 def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = self.scheduler_classes[0] __lowerCamelCase = self.get_scheduler_config() __lowerCamelCase = scheduler_class(**a , use_karras_sigmas=a ) scheduler.set_timesteps(self.num_inference_steps , device=a ) __lowerCamelCase = self.dummy_model() __lowerCamelCase = self.dummy_sample_deter.to(a ) * scheduler.init_noise_sigma __lowerCamelCase = sample.to(a ) for t in scheduler.timesteps: __lowerCamelCase = scheduler.scale_model_input(a , a ) __lowerCamelCase = model(a , a ) __lowerCamelCase = scheduler.step(a , a , a ) __lowerCamelCase = output.prev_sample __lowerCamelCase = torch.sum(torch.abs(a ) ) __lowerCamelCase = torch.mean(torch.abs(a ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2 assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2 assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2 else: assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2 assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11 ) < 1e-2
237
0
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def lowerCAmelCase_ ( snake_case_ : int ) -> List[str]: '''simple docstring''' if "img_encoder.pos_embed" in name: UpperCAmelCase_ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: UpperCAmelCase_ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: UpperCAmelCase_ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: UpperCAmelCase_ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: UpperCAmelCase_ = name.replace("blocks" , "layers" ) if "attn" in name and "pre_assign" not in name: UpperCAmelCase_ = name.replace("attn" , "self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: UpperCAmelCase_ = name.replace("proj" , "out_proj" ) if "pre_assign_attn.attn.proj" in name: UpperCAmelCase_ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" ) if "norm1" in name: UpperCAmelCase_ = name.replace("norm1" , "layer_norm1" ) if "norm2" in name and "pre_assign" not in name: UpperCAmelCase_ = name.replace("norm2" , "layer_norm2" ) if "img_encoder.norm" in name: UpperCAmelCase_ = name.replace("img_encoder.norm" , "vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: UpperCAmelCase_ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: UpperCAmelCase_ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: UpperCAmelCase_ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." ) if "ln_1" in name: UpperCAmelCase_ = name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: UpperCAmelCase_ = name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: UpperCAmelCase_ = name.replace("c_fc" , "fc1" ) if "c_proj" in name: UpperCAmelCase_ = name.replace("c_proj" , "fc2" ) if "text_encoder" in name: UpperCAmelCase_ = name.replace("text_encoder" , "text_model" ) if "ln_final" in name: UpperCAmelCase_ = name.replace("ln_final" , "final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: UpperCAmelCase_ = name.replace("img_projector.linear_hidden." , "visual_projection." ) if "img_projector.linear_out." in name: UpperCAmelCase_ = name.replace("img_projector.linear_out." , "visual_projection.3." ) if "text_projector.linear_hidden" in name: UpperCAmelCase_ = name.replace("text_projector.linear_hidden" , "text_projection" ) if "text_projector.linear_out" in name: UpperCAmelCase_ = name.replace("text_projector.linear_out" , "text_projection.3" ) return name def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[str] ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase_ = orig_state_dict.pop(snake_case_ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors UpperCAmelCase_ = key.split("." ) UpperCAmelCase_ , UpperCAmelCase_ = int(key_split[2] ), int(key_split[4] ) UpperCAmelCase_ = config.vision_config.hidden_size if "weight" in key: UpperCAmelCase_ = val[:dim, :] UpperCAmelCase_ = val[dim : dim * 2, :] UpperCAmelCase_ = val[-dim:, :] else: UpperCAmelCase_ = val[:dim] UpperCAmelCase_ = val[dim : dim * 2] UpperCAmelCase_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors UpperCAmelCase_ = key.split("." ) UpperCAmelCase_ = int(key_split[3] ) UpperCAmelCase_ = config.text_config.hidden_size if "weight" in key: UpperCAmelCase_ = val[:dim, :] UpperCAmelCase_ = val[ dim : dim * 2, : ] UpperCAmelCase_ = val[-dim:, :] else: UpperCAmelCase_ = val[:dim] UpperCAmelCase_ = val[dim : dim * 2] UpperCAmelCase_ = val[-dim:] else: UpperCAmelCase_ = rename_key(snake_case_ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): UpperCAmelCase_ = val.squeeze_() else: UpperCAmelCase_ = val return orig_state_dict def lowerCAmelCase_ ( ) -> Any: '''simple docstring''' UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any]="groupvit-gcc-yfcc" , snake_case_ : List[str]=False ) -> Any: '''simple docstring''' UpperCAmelCase_ = GroupViTConfig() UpperCAmelCase_ = GroupViTModel(snake_case_ ).eval() UpperCAmelCase_ = torch.load(snake_case_ , map_location="cpu" )["model"] UpperCAmelCase_ = convert_state_dict(snake_case_ , snake_case_ ) UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(snake_case_ ) == 0) # verify result UpperCAmelCase_ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = processor(text=["a photo of a cat", "a photo of a dog"] , images=snake_case_ , padding=snake_case_ , return_tensors="pt" ) with torch.no_grad(): UpperCAmelCase_ = model(**snake_case_ ) if model_name == "groupvit-gcc-yfcc": UpperCAmelCase_ = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": UpperCAmelCase_ = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(f"""Model name {model_name} not supported.""" ) assert torch.allclose(outputs.logits_per_image , snake_case_ , atol=1E-3 ) processor.save_pretrained(snake_case_ ) model.save_pretrained(snake_case_ ) print("Successfully saved processor and model to" , snake_case_ ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(snake_case_ , organization="nielsr" ) model.push_to_hub(snake_case_ , organization="nielsr" ) if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Dict =argparse.ArgumentParser() parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.' ) parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint') parser.add_argument( '--model_name', default='groupvit-gccy-fcc', type=str, help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.', ) SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
1
"""simple docstring""" class snake_case : '''simple docstring''' def __init__( self : List[str], _lowerCamelCase : list[int] ): '''simple docstring''' __A = len(_lowerCamelCase ) __A = [0] * len_array if len_array > 0: __A = array[0] for i in range(1, _lowerCamelCase ): __A = self.prefix_sum[i - 1] + array[i] def _SCREAMING_SNAKE_CASE ( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : int ): '''simple docstring''' if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : int ): '''simple docstring''' __A = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_lowerCamelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
266
0
"""simple docstring""" def _lowercase ( __snake_case = 4_000_000 ) -> int: __lowerCAmelCase : Any = [0, 1] __lowerCAmelCase : Dict = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 __lowerCAmelCase : Dict = 0 for j in range(len(__snake_case ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F"""{solution() = }""")
58
"""simple docstring""" def _lowercase ( __snake_case ) -> int: if not isinstance(__snake_case ,__snake_case ): raise ValueError("Input must be an integer" ) if input_num <= 0: raise ValueError("Input must be positive" ) return sum( divisor for divisor in range(1 ,input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
58
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer A =['bert-base-uncased', 'bert-base-cased'] A ='hf-internal-testing/tiny-bert-tf-only' if is_tf_available(): class _a ( tf.keras.Model ): def __init__( self : Optional[int] , lowercase : List[Any] ): '''simple docstring''' super().__init__() UpperCAmelCase = tokenizer UpperCAmelCase = AutoConfig.from_pretrained(lowercase ) UpperCAmelCase = TFAutoModel.from_config(lowercase ) def A ( self : List[str] , lowercase : Dict ): '''simple docstring''' UpperCAmelCase = self.tokenizer(lowercase ) UpperCAmelCase = self.bert(**lowercase ) return out["pooler_output"] @require_tf @require_tensorflow_text class _a ( unittest.TestCase ): def A ( self : Tuple ): '''simple docstring''' super().setUp() UpperCAmelCase = [ BertTokenizer.from_pretrained(lowercase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false UpperCAmelCase = [TFBertTokenizer.from_pretrained(lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(lowercase , use_fast_bert_tokenizer=lowercase ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) UpperCAmelCase = [ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] UpperCAmelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def A ( self : Optional[int] ): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): UpperCAmelCase = tokenizer(lowercase , return_tensors='''tf''' , padding='''longest''' ) UpperCAmelCase = tf_tokenizer(lowercase ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def A ( self : Union[str, Any] ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase = tf_tokenizer(self.paired_sentences ) UpperCAmelCase = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def A ( self : List[str] ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase = tf.function(lowercase ) for test_inputs in (self.test_sentences, self.paired_sentences): UpperCAmelCase = tf.constant(lowercase ) UpperCAmelCase = compiled_tokenizer(lowercase ) UpperCAmelCase = tf_tokenizer(lowercase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def A ( self : List[Any] ): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: UpperCAmelCase = ModelToSave(tokenizer=lowercase ) UpperCAmelCase = tf.convert_to_tensor(self.test_sentences ) UpperCAmelCase = model(lowercase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: UpperCAmelCase = Path(lowercase ) / '''saved.model''' model.save(lowercase ) UpperCAmelCase = tf.keras.models.load_model(lowercase ) UpperCAmelCase = loaded_model(lowercase ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
34
"""simple docstring""" import os import sys a :Union[str, Any] = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) a :int = [ "torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub", ] @add_start_docstrings(AutoConfig.__doc__ ) def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]: return AutoConfig.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]: return AutoTokenizer.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModel.__doc__ ) def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict: return AutoModel.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[int]: return AutoModelForCausalLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[str]: return AutoModelForMaskedLM.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> str: return AutoModelForSequenceClassification.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def _lowercase ( *__lowerCAmelCase , **__lowerCAmelCase ) -> int: return AutoModelForQuestionAnswering.from_pretrained(*__lowerCAmelCase , **__lowerCAmelCase )
132
0
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class __lowercase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = (PNDMScheduler,) __lowerCAmelCase = (('''num_inference_steps''', 50),) def _lowerCamelCase ( self , **_UpperCAmelCase ): __a : Union[str, Any] = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', } config.update(**_UpperCAmelCase ) return config def _lowerCamelCase ( self , _UpperCAmelCase=0 , **_UpperCAmelCase ): __a : str = dict(self.forward_default_kwargs ) __a : str = kwargs.pop('''num_inference_steps''' , _UpperCAmelCase ) __a : int = self.dummy_sample __a : Any = 0.1 * sample __a : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: __a : int = self.get_scheduler_config(**_UpperCAmelCase ) __a : str = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals __a : List[str] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) __a : Optional[int] = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals __a : str = dummy_past_residuals[:] __a : Dict = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample __a : Optional[Any] = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" __a : Optional[Any] = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample __a : Union[str, Any] = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _lowerCamelCase ( self ): pass def _lowerCamelCase ( self , _UpperCAmelCase=0 , **_UpperCAmelCase ): __a : Optional[Any] = dict(self.forward_default_kwargs ) __a : str = kwargs.pop('''num_inference_steps''' , _UpperCAmelCase ) __a : List[str] = self.dummy_sample __a : Optional[Any] = 0.1 * sample __a : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: __a : List[Any] = self.get_scheduler_config() __a : Dict = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) __a : Tuple = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) __a : Any = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) __a : Optional[int] = dummy_past_residuals[:] __a : List[Any] = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample __a : Optional[Any] = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" __a : Union[str, Any] = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample __a : int = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def _lowerCamelCase ( self , **_UpperCAmelCase ): __a : Optional[int] = self.scheduler_classes[0] __a : Any = self.get_scheduler_config(**_UpperCAmelCase ) __a : Union[str, Any] = scheduler_class(**_UpperCAmelCase ) __a : List[str] = 10 __a : Optional[int] = self.dummy_model() __a : Optional[Any] = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): __a : Union[str, Any] = model(_UpperCAmelCase , _UpperCAmelCase ) __a : str = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): __a : Union[str, Any] = model(_UpperCAmelCase , _UpperCAmelCase ) __a : Tuple = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def _lowerCamelCase ( self ): __a : Any = dict(self.forward_default_kwargs ) __a : int = kwargs.pop('''num_inference_steps''' , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: __a : Any = self.get_scheduler_config() __a : Any = scheduler_class(**_UpperCAmelCase ) __a : int = self.dummy_sample __a : str = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , '''set_timesteps''' ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , '''set_timesteps''' ): __a : Optional[int] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __a : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] __a : Optional[Any] = dummy_past_residuals[:] __a : Dict = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample __a : Union[str, Any] = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __a : Dict = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample __a : Any = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _lowerCamelCase ( self ): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def _lowerCamelCase ( self ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) __a : List[Any] = self.scheduler_classes[0] __a : Tuple = self.get_scheduler_config(steps_offset=1 ) __a : int = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def _lowerCamelCase ( self ): for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def _lowerCamelCase ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def _lowerCamelCase ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def _lowerCamelCase ( self ): for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def _lowerCamelCase ( self ): for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def _lowerCamelCase ( self ): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 __a : int = 27 for scheduler_class in self.scheduler_classes: __a : Union[str, Any] = self.dummy_sample __a : Tuple = 0.1 * sample __a : Union[str, Any] = self.get_scheduler_config() __a : Union[str, Any] = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): __a : Tuple = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def _lowerCamelCase ( self ): with self.assertRaises(_UpperCAmelCase ): __a : Optional[int] = self.scheduler_classes[0] __a : Optional[int] = self.get_scheduler_config() __a : Tuple = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def _lowerCamelCase ( self ): __a : Union[str, Any] = self.full_loop() __a : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) ) __a : int = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2 assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3 def _lowerCamelCase ( self ): __a : List[str] = self.full_loop(prediction_type='''v_prediction''' ) __a : Optional[Any] = torch.sum(torch.abs(_UpperCAmelCase ) ) __a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2 assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3 def _lowerCamelCase ( self ): # We specify different beta, so that the first alpha is 0.99 __a : Optional[int] = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.0_1 ) __a : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) ) __a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2 assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3 def _lowerCamelCase ( self ): # We specify different beta, so that the first alpha is 0.99 __a : Any = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.0_1 ) __a : str = torch.sum(torch.abs(_UpperCAmelCase ) ) __a : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2 assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
188
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __lowercase : '''simple docstring''' @staticmethod def _lowerCamelCase ( *_UpperCAmelCase , **_UpperCAmelCase ): pass def __A ( a_ :Image) -> str: __a : List[str] = hashlib.mda(image.tobytes()) return m.hexdigest()[:10] def __A ( a_ :Image) -> Dict: __a : Any = np.array(a_) __a : Tuple = npimg.shape return {"hash": hashimage(a_), "shape": shape} @is_pipeline_test @require_vision @require_torch class __lowercase ( unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __lowerCAmelCase = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : List[str] = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ): pass @require_tf @unittest.skip('''Image segmentation not implemented in TF''' ) def _lowerCamelCase ( self ): pass @slow @require_torch def _lowerCamelCase ( self ): __a : Dict = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' ) __a : Optional[Any] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 ) # Shortening by hashing __a : Optional[int] = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3}, {'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_9_6_7}, {'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.9_9_3}, {'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_9_0_9}, {'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_8_7_9}, {'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_8_3_4}, {'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_7_1_6}, {'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_6_1_2}, {'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_5_9_9}, {'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_5_5_2}, {'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_5_3_2}, {'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_5_1_6}, {'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_4_9_9}, {'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_4_8_3}, {'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_4_6_4}, {'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3}, {'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3}, {'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_4_0_8}, {'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_3_3_5}, {'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_3_2_6}, {'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_2_6_2}, {'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_9_9_9}, {'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_6}, {'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_4}, {'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_3}, {'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_1} ] , ) # fmt: on @require_torch @slow def _lowerCamelCase ( self ): __a : Dict = '''facebook/sam-vit-huge''' __a : Tuple = pipeline('''mask-generation''' , model=_UpperCAmelCase ) __a : List[Any] = image_segmenter( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __a : Optional[int] = [] for i, o in enumerate(outputs['''masks'''] ): new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(_UpperCAmelCase , decimals=4 ) , [ {'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4}, {'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1_0}, {'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7}, {'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2}, {'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3}, ] , )
188
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase__ : str = { """configuration_owlvit""": [ """OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OwlViTConfig""", """OwlViTOnnxConfig""", """OwlViTTextConfig""", """OwlViTVisionConfig""", ], """processing_owlvit""": ["""OwlViTProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[Any] = ["""OwlViTFeatureExtractor"""] lowercase__ : Optional[int] = ["""OwlViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Union[str, Any] = [ """OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OwlViTModel""", """OwlViTPreTrainedModel""", """OwlViTTextModel""", """OwlViTVisionModel""", """OwlViTForObjectDetection""", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
324
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : str = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :Optional[int] = 'convnextv2' def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Dict = num_channels UpperCamelCase : Union[str, Any] = patch_size UpperCamelCase : Union[str, Any] = num_stages UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : str = drop_path_rate UpperCamelCase : List[str] = image_size UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
52
0
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __a :int = get_tests_dir('fixtures/test_sentencepiece_no_bos.model') @require_sentencepiece @require_tokenizers class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : int = PegasusTokenizer _lowerCamelCase : Dict = PegasusTokenizerFast _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : str = True def __A ( self : Dict ): super().setUp() # We have a SentencePiece fixture for testing A_ = PegasusTokenizer(UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __A ( self : str ): return PegasusTokenizer.from_pretrained("google/pegasus-large" ) def __A ( self : List[Any] , **UpperCAmelCase : Tuple ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : Optional[Any] , UpperCAmelCase : Dict ): return ("This is a test", "This is a test") def __A ( self : Optional[int] ): A_ = "</s>" A_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase ) def __A ( self : Dict ): A_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<pad>" ) self.assertEqual(vocab_keys[1] , "</s>" ) self.assertEqual(vocab_keys[-1] , "v" ) self.assertEqual(len(UpperCAmelCase ) , 1103 ) def __A ( self : Union[str, Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __A ( self : Dict ): A_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) A_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) A_ = ( "Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important" " </s> <pad> <pad> <pad>" ) A_ = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase , add_special_tokens=UpperCAmelCase ).input_ids[0] A_ = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase , add_special_tokens=UpperCAmelCase ).input_ids[0] self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Dict ): A_ = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word A_ = "<mask_1> To ensure a <mask_2> flow of bank resolutions." A_ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1] A_ = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase ).input_ids[0] self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) def __A ( self : Union[str, Any] ): A_ = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 96103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 A_ = "To ensure a smooth flow of bank resolutions." A_ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1] A_ = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase ).input_ids[0] self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __A ( self : str ): A_ = ["This is going to be way too long." * 150, "short example"] A_ = ["not super long but more than 5 tokens", "tiny"] A_ = self._large_tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="pt" ) A_ = self._large_tokenizer( text_target=UpperCAmelCase , max_length=5 , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="pt" ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __A ( self : Optional[Any] ): # fmt: off A_ = {"input_ids": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , ) @require_sentencepiece @require_tokenizers class _a ( snake_case_ , unittest.TestCase ): """simple docstring""" _lowerCamelCase : Union[str, Any] = PegasusTokenizer _lowerCamelCase : int = PegasusTokenizerFast _lowerCamelCase : Tuple = True _lowerCamelCase : int = True def __A ( self : Dict ): super().setUp() # We have a SentencePiece fixture for testing A_ = PegasusTokenizer(UpperCAmelCase , offset=0 , mask_token_sent=UpperCAmelCase , mask_token="[MASK]" ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __A ( self : Optional[int] ): return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" ) def __A ( self : Union[str, Any] , **UpperCAmelCase : Optional[Any] ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def __A ( self : List[str] , UpperCAmelCase : List[str] ): return ("This is a test", "This is a test") def __A ( self : Optional[Any] ): A_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) A_ = self.tokenizer_class.from_pretrained(self.tmpdirname ) A_ = ( "Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>" " <pad> <pad> <pad>" ) A_ = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase , add_special_tokens=UpperCAmelCase ).input_ids[0] A_ = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase , add_special_tokens=UpperCAmelCase ).input_ids[0] self.assertListEqual(UpperCAmelCase , UpperCAmelCase ) @require_torch def __A ( self : List[Any] ): A_ = ["This is going to be way too long." * 1000, "short example"] A_ = ["not super long but more than 5 tokens", "tiny"] A_ = self._large_tokenizer(UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="pt" ) A_ = self._large_tokenizer( text_target=UpperCAmelCase , max_length=5 , padding=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors="pt" ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(UpperCAmelCase ) == 2 # input_ids, attention_mask. def __A ( self : Union[str, Any] ): A_ = ( "This is an example string that is used to test the original TF implementation against the HF" " implementation" ) A_ = self._large_tokenizer(UpperCAmelCase ).input_ids self.assertListEqual( UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
329
import math __a :Union[str, Any] = 10 __a :Union[str, Any] = 7 __a :int = BALLS_PER_COLOUR * NUM_COLOURS def __snake_case ( __UpperCamelCase : int = 20 ): """simple docstring""" A_ = math.comb(__UpperCamelCase ,__UpperCamelCase ) A_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__UpperCamelCase ) A_ = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
329
1
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class A : def __init__(self , lowerCAmelCase , lowerCAmelCase=1_4 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=0.02 , ): __lowercase= parent __lowercase= batch_size __lowercase= seq_length __lowercase= is_training __lowercase= use_input_mask __lowercase= use_token_type_ids __lowercase= use_labels __lowercase= vocab_size __lowercase= hidden_size __lowercase= rotary_dim __lowercase= num_hidden_layers __lowercase= num_attention_heads __lowercase= intermediate_size __lowercase= hidden_act __lowercase= hidden_dropout_prob __lowercase= attention_probs_dropout_prob __lowercase= max_position_embeddings __lowercase= initializer_range __lowercase= None __lowercase= vocab_size - 1 __lowercase= vocab_size - 1 __lowercase= vocab_size - 1 def _A (self ): __lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase= None if self.use_input_mask: __lowercase= random_attention_mask([self.batch_size, self.seq_length] ) __lowercase= GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def _A (self ): __lowercase= self.prepare_config_and_inputs() __lowercase, __lowercase, __lowercase= config_and_inputs __lowercase= {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= 2_0 __lowercase= model_class_name(UpperCamelCase__ ) __lowercase= model.init_cache(input_ids.shape[0] , UpperCamelCase__ ) __lowercase= jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) __lowercase= jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowercase= model( input_ids[:, :-1] , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , position_ids=UpperCamelCase__ , ) __lowercase= jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowercase= model( input_ids[:, -1:] , attention_mask=UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCamelCase__ , ) __lowercase= model(UpperCamelCase__ ) __lowercase= np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' ) def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): __lowercase= 2_0 __lowercase= model_class_name(UpperCamelCase__ ) __lowercase= jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __lowercase= model.init_cache(input_ids.shape[0] , UpperCamelCase__ ) __lowercase= jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowercase= model( input_ids[:, :-1] , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , position_ids=UpperCamelCase__ , ) __lowercase= jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowercase= model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCamelCase__ , position_ids=UpperCamelCase__ , ) __lowercase= model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) __lowercase= np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' ) @require_flax class A ( _a , _a , unittest.TestCase ): UpperCamelCase_ : List[Any] =(FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () UpperCamelCase_ : Tuple =(FlaxGPTJForCausalLM,) if is_flax_available() else () def _A (self ): __lowercase= FlaxGPTJModelTester(self ) def _A (self ): for model_class_name in self.all_model_classes: __lowercase, __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _A (self ): for model_class_name in self.all_model_classes: __lowercase, __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) @tooslow def _A (self ): __lowercase= GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) __lowercase= tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=UpperCamelCase__ , truncation=UpperCamelCase__ ) __lowercase= FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) __lowercase= False __lowercase= model.config.eos_token_id __lowercase= jax.jit(model.generate ) __lowercase= jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences __lowercase= tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) __lowercase= [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) @is_pt_flax_cross_test def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowercase= self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) __lowercase= {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowercase= model_class.__name__[4:] # Skip the "Flax" at the beginning __lowercase= getattr(UpperCamelCase__ , UpperCamelCase__ ) __lowercase, __lowercase= pt_inputs['input_ids'].shape __lowercase= np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCamelCase__ ): __lowercase= 0 __lowercase= 1 __lowercase= 0 __lowercase= 1 __lowercase= pt_model_class(UpperCamelCase__ ).eval() __lowercase= model_class(UpperCamelCase__ , dtype=jnp.floataa ) __lowercase= convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase__ ) __lowercase= fx_state with torch.no_grad(): __lowercase= pt_model(**UpperCamelCase__ ).to_tuple() __lowercase= fx_model(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(UpperCamelCase__ ) __lowercase= model_class.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ ) __lowercase= fx_model_loaded(**UpperCamelCase__ ).to_tuple() self.assertEqual( len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def _A (self ): __lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowercase= self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) __lowercase= {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowercase= model_class.__name__[4:] # Skip the "Flax" at the beginning __lowercase= getattr(UpperCamelCase__ , UpperCamelCase__ ) __lowercase= pt_model_class(UpperCamelCase__ ).eval() __lowercase= model_class(UpperCamelCase__ , dtype=jnp.floataa ) __lowercase= load_flax_weights_in_pytorch_model(UpperCamelCase__ , fx_model.params ) __lowercase, __lowercase= pt_inputs['input_ids'].shape __lowercase= np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(UpperCamelCase__ ): __lowercase= 0 __lowercase= 1 __lowercase= 0 __lowercase= 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __lowercase= pt_model(**UpperCamelCase__ ).to_tuple() __lowercase= fx_model(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(UpperCamelCase__ ) __lowercase= pt_model_class.from_pretrained(UpperCamelCase__ , from_flax=UpperCamelCase__ ) with torch.no_grad(): __lowercase= pt_model_loaded(**UpperCamelCase__ ).to_tuple() self.assertEqual( len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def _A (self ): for model_class_name in self.all_model_classes: __lowercase= model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) __lowercase= model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase__ )
295
'''simple docstring''' def __lowerCamelCase ( A__ = 50 ) -> int: """simple docstring""" UpperCamelCase = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
28
0
def UpperCamelCase ( snake_case__ : str ) -> str: UpperCamelCase : str = 0 # if input_string is "aba" than new_input_string become "a|b|a" UpperCamelCase : str = '' UpperCamelCase : Dict = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(snake_case__ ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring UpperCamelCase , UpperCamelCase : Tuple = 0, 0 # length[i] shows the length of palindromic substring with center i UpperCamelCase : Tuple = [1 for i in range(len(snake_case__ ) )] # for each character in new_string find corresponding palindromic string UpperCamelCase : Dict = 0 for j in range(len(snake_case__ ) ): UpperCamelCase : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(snake_case__ ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 UpperCamelCase : Optional[Any] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: UpperCamelCase : Union[str, Any] = j - k + 1 # noqa: E741 UpperCamelCase : Tuple = j + k - 1 # update max_length and start position if max_length < length[j]: UpperCamelCase : List[Any] = length[j] UpperCamelCase : Dict = j # create that string UpperCamelCase : List[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
103
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=30, SCREAMING_SNAKE_CASE_=400, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], SCREAMING_SNAKE_CASE_=[0.5, 0.5, 0.5], ) -> List[str]: UpperCamelCase : Optional[int] = size if size is not None else {'height': 18, 'width': 18} UpperCamelCase : List[Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : int = num_channels UpperCamelCase : int = image_size UpperCamelCase : List[Any] = min_resolution UpperCamelCase : int = max_resolution UpperCamelCase : Any = do_resize UpperCamelCase : Optional[int] = size UpperCamelCase : List[str] = do_normalize UpperCamelCase : Optional[Any] = image_mean UpperCamelCase : Tuple = image_std def snake_case_ ( self ) -> List[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class lowerCAmelCase_ ( a__ , unittest.TestCase ): UpperCAmelCase__ : Optional[Any] = ViTImageProcessor if is_vision_available() else None def snake_case_ ( self ) -> Any: UpperCamelCase : Dict = EfficientFormerImageProcessorTester(self ) @property def snake_case_ ( self ) -> List[Any]: return self.image_proc_tester.prepare_image_processor_dict() def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_mean' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'image_std' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_normalize' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'do_resize' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'size' ) ) def snake_case_ ( self ) -> Any: pass def snake_case_ ( self ) -> int: # Initialize image_processor UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : List[str] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_, Image.Image ) # Test not batched input UpperCamelCase : str = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ), ) # Test batched UpperCamelCase : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ), ) def snake_case_ ( self ) -> str: # Initialize image_processor UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, numpify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_, np.ndarray ) # Test not batched input UpperCamelCase : Dict = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ), ) # Test batched UpperCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ), ) def snake_case_ ( self ) -> Tuple: # Initialize image_processor UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : int = prepare_image_inputs(self.image_proc_tester, equal_resolution=SCREAMING_SNAKE_CASE_, torchify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_, torch.Tensor ) # Test not batched input UpperCamelCase : Optional[int] = image_processor(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ), ) # Test batched UpperCamelCase : int = image_processor(SCREAMING_SNAKE_CASE_, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['height'], self.image_proc_tester.size['width'], ), )
103
1
"""simple docstring""" import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' lowercase_ : List[Any] = PhobertTokenizer lowercase_ : Dict = False def lowerCamelCase_ ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A_ : Tuple = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@'] A_ : Optional[int] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) A_ : Dict = ['#version: 0.2', 'l à</w>'] A_ : Optional[int] = {'unk_token': '<unk>'} A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: for token in vocab_tokens: fp.write(F"""{token} {vocab_tokens[token]}\n""" ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(snake_case_ ) ) def lowerCamelCase_ ( self , **snake_case_ ): """simple docstring""" kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowerCamelCase_ ( self , snake_case_ ): """simple docstring""" A_ : List[str] = 'Tôi là VinAI Research' A_ : Dict = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>' return input_text, output_text def lowerCamelCase_ ( self ): """simple docstring""" A_ : List[str] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) A_ : Optional[Any] = 'Tôi là VinAI Research' A_ : Any = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split() A_ : str = tokenizer.tokenize(snake_case_ ) print(snake_case_ ) self.assertListEqual(snake_case_ , snake_case_ ) A_ : int = tokens + [tokenizer.unk_token] A_ : Optional[Any] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
286
"""simple docstring""" from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration lowerCamelCase_ : Any = HfArgumentParser(InitializationArguments) lowerCamelCase_ : Union[str, Any] = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization lowerCamelCase_ : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks lowerCamelCase_ : Tuple = { 'vocab_size': len(tokenizer), 'scale_attn_by_inverse_layer_idx': True, 'reorder_and_upcast_attn': True, } # Load model config (GPT-2 large in this case) lowerCamelCase_ : int = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config lowerCamelCase_ : Any = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
286
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
363
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ): a : str =KandinskyVaaInpaintPipeline a : int =["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] a : str =[ """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] a : Optional[int] =[ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] a : Dict =False @property def lowerCamelCase__ ( self ): '''simple docstring''' return 32 @property def lowerCamelCase__ ( self ): '''simple docstring''' return 32 @property def lowerCamelCase__ ( self ): '''simple docstring''' return self.time_input_dim @property def lowerCamelCase__ ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCamelCase__ ( self ): '''simple docstring''' return 1_00 @property def lowerCamelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase = { """in_channels""": 9, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE ) return model @property def lowerCamelCase__ ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCamelCase__ ( self ): '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = self.dummy_unet __lowerCAmelCase = self.dummy_movq __lowerCAmelCase = DDIMScheduler( num_train_timesteps=10_00,beta_schedule="""linear""",beta_start=0.0_0085,beta_end=0.012,clip_sample=__SCREAMING_SNAKE_CASE,set_alpha_to_one=__SCREAMING_SNAKE_CASE,steps_offset=1,prediction_type="""epsilon""",thresholding=__SCREAMING_SNAKE_CASE,) __lowerCAmelCase = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ): '''simple docstring''' __lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to( __SCREAMING_SNAKE_CASE ) # create init_image __lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0] __lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) ) # create mask __lowerCAmelCase = np.ones((64, 64),dtype=np.floataa ) __lowerCAmelCase = 0 if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = { """image""": init_image, """mask_image""": mask, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 2, """guidance_scale""": 4.0, """output_type""": """np""", } return inputs def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = """cpu""" __lowerCAmelCase = self.get_dummy_components() __lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase = output.images __lowerCAmelCase = pipe( **self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0] __lowerCAmelCase = image[0, -3:, -3:, -1] __lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] print(f'image.shape {image.shape}' ) assert image.shape == (1, 64, 64, 3) __lowerCAmelCase = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' def lowerCamelCase__ ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def lowerCamelCase__ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self ): '''simple docstring''' __lowerCAmelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" ) __lowerCAmelCase = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __lowerCAmelCase = np.ones((7_68, 7_68),dtype=np.floataa ) __lowerCAmelCase = 0 __lowerCAmelCase = """a hat""" __lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa ) pipe_prior.to(__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = KandinskyVaaInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder-inpaint""",torch_dtype=torch.floataa ) __lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE ) pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase = pipe_prior( __SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple() __lowerCAmelCase = pipeline( image=__SCREAMING_SNAKE_CASE,mask_image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,output_type="""np""",) __lowerCAmelCase = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
46
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowercase__ = logging.get_logger(__name__) lowercase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase__ = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } lowercase__ = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class __lowerCamelCase ( A__ ): '''simple docstring''' a_ : str = VOCAB_FILES_NAMES a_ : List[str] = PRETRAINED_VOCAB_FILES_MAP a_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ : str = ["""input_ids""", """attention_mask"""] a_ : str = RobertaTokenizer def __init__( self : str , a_ : List[str]=None , a_ : Any=None , a_ : Dict=None , a_ : Optional[int]="replace" , a_ : Optional[int]="<s>" , a_ : Optional[int]="</s>" , a_ : Any="</s>" , a_ : List[str]="<s>" , a_ : Any="<unk>" , a_ : Optional[Any]="<pad>" , a_ : str="<mask>" , a_ : Dict=False , a_ : str=True , **a_ : List[str] , ): super().__init__( a_ , a_ , tokenizer_file=a_ , errors=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , add_prefix_space=a_ , trim_offsets=a_ , **a_ , ) lowerCAmelCase_ : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , a_ ) != add_prefix_space: lowerCAmelCase_ : Any = getattr(a_ , pre_tok_state.pop("type" ) ) lowerCAmelCase_ : List[str] = add_prefix_space lowerCAmelCase_ : Tuple = pre_tok_class(**a_ ) lowerCAmelCase_ : List[str] = add_prefix_space lowerCAmelCase_ : str = "post_processor" lowerCAmelCase_ : Optional[int] = getattr(self.backend_tokenizer , a_ , a_ ) if tokenizer_component_instance: lowerCAmelCase_ : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowerCAmelCase_ : Tuple = tuple(state["sep"] ) if "cls" in state: lowerCAmelCase_ : Optional[int] = tuple(state["cls"] ) lowerCAmelCase_ : Optional[int] = False if state.get("add_prefix_space" , a_ ) != add_prefix_space: lowerCAmelCase_ : Tuple = add_prefix_space lowerCAmelCase_ : int = True if state.get("trim_offsets" , a_ ) != trim_offsets: lowerCAmelCase_ : List[str] = trim_offsets lowerCAmelCase_ : Tuple = True if changes_to_apply: lowerCAmelCase_ : Union[str, Any] = getattr(a_ , state.pop("type" ) ) lowerCAmelCase_ : Union[str, Any] = component_class(**a_ ) setattr(self.backend_tokenizer , a_ , a_ ) @property def lowerCamelCase ( self : Union[str, Any] ): if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def lowerCamelCase ( self : Union[str, Any] , a_ : int ): lowerCAmelCase_ : Union[str, Any] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else value lowerCAmelCase_ : Optional[int] = value def lowerCamelCase ( self : Tuple , *a_ : Optional[int] , **a_ : Union[str, Any] ): lowerCAmelCase_ : Tuple = kwargs.get("is_split_into_words" , a_ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*a_ , **a_ ) def lowerCamelCase ( self : int , *a_ : Dict , **a_ : Tuple ): lowerCAmelCase_ : List[str] = kwargs.get("is_split_into_words" , a_ ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*a_ , **a_ ) def lowerCamelCase ( self : Any , a_ : str , a_ : Optional[str] = None ): lowerCAmelCase_ : Optional[Any] = self._tokenizer.model.save(a_ , name=a_ ) return tuple(a_ ) def lowerCamelCase ( self : List[Any] , a_ : Dict , a_ : Tuple=None ): lowerCAmelCase_ : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCamelCase ( self : Tuple , a_ : List[int] , a_ : Optional[List[int]] = None ): lowerCAmelCase_ : Any = [self.sep_token_id] lowerCAmelCase_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
241
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowerCamelCase ( A__ , A__ , unittest.TestCase ): '''simple docstring''' a_ : Optional[Any] = IFInpaintingSuperResolutionPipeline a_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""} a_ : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} ) a_ : int = PipelineTesterMixin.required_optional_params - {"""latents"""} def lowerCamelCase ( self : Optional[Any] ): return self._get_superresolution_dummy_components() def lowerCamelCase ( self : Optional[Any] , a_ : List[str] , a_ : Union[str, Any]=0 ): if str(a_ ).startswith("mps" ): lowerCAmelCase_ : List[Any] = torch.manual_seed(a_ ) else: lowerCAmelCase_ : str = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCAmelCase_ : List[str] = floats_tensor((1, 3, 16, 16) , rng=random.Random(a_ ) ).to(a_ ) lowerCAmelCase_ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowerCAmelCase_ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowerCAmelCase_ : Any = { "prompt": "A painting of a squirrel eating a burger", "image": image, "original_image": original_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowerCamelCase ( self : List[Any] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def lowerCamelCase ( self : Optional[int] ): self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def lowerCamelCase ( self : Optional[Any] ): # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def lowerCamelCase ( self : Tuple ): self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def lowerCamelCase ( self : List[str] ): self._test_save_load_local() def lowerCamelCase ( self : Optional[int] ): self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
241
1
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel _a : Union[str, Any] = HfApi() _a : Tuple = {} # fmt: off _a : List[Any] = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) _a : Union[str, Any] = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) _a : int = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) _a : Optional[int] = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) _a : int = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) _a : Optional[int] = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) _a : int = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) _a : int = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) _a : List[Any] = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) _a : Dict = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) _a : Union[str, Any] = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) _a : Union[str, Any] = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) _a : Tuple = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) _a : List[Any] = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) _a : List[str] = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on _a : int = api.list_models(filter="""diffusers""") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": _a : Optional[Any] = "/home/patrick/google_checkpoints/" + mod.modelId.split("""/""")[-1] print(f'Started running {mod.modelId}!!!') if mod.modelId.startswith("""CompVis"""): _a : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""") else: _a : Union[str, Any] = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) _a : Optional[int] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) _a : Any = torch.tensor([1_0] * noise.shape[0]) with torch.no_grad(): _a : int = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :3_0], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3 ) print(f'{mod.modelId} has passed successfully!!!')
357
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a : List[Any] = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Optional[Any] = [ """FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FocalNetForImageClassification""", """FocalNetForMaskedImageModeling""", """FocalNetBackbone""", """FocalNetModel""", """FocalNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
46
0
import random from .binary_exp_mod import bin_exp_mod def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=1000 ) -> List[str]: """simple docstring""" if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd __lowerCamelCase = n - 1 __lowerCamelCase = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) __lowerCamelCase = 0 while count < prec: __lowerCamelCase = random.randint(2 , n - 1 ) __lowerCamelCase = bin_exp_mod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if b != 1: __lowerCamelCase = True for _ in range(_SCREAMING_SNAKE_CASE ): if b == n - 1: __lowerCamelCase = False break __lowerCamelCase = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": __A = abs(int(input("Enter bound : ").strip())) print("Here\'s the list of primes:") print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
90
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any: # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) lowerCamelCase : str = (boundary[1] - boundary[0]) / steps lowerCamelCase : List[str] = boundary[0] lowerCamelCase : Union[str, Any] = boundary[1] lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) lowerCamelCase : List[str] = 0.0 y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) for i in x_i: # print(i) y += h * f(_SCREAMING_SNAKE_CASE ) y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE ) return y def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int: lowerCamelCase : int = a + h while x < (b - h): yield x lowerCamelCase : List[str] = x + h def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here lowerCamelCase : str = (x - 0) * (x - 0) return y def A ( ) -> int: lowerCamelCase : int = 0.0 # Lower bound of integration lowerCamelCase : int = 1.0 # Upper bound of integration lowerCamelCase : Dict = 10.0 # define number of steps or resolution lowerCamelCase : int = [a, b] # define boundary of integration lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) print(f'''y = {y}''' ) if __name__ == "__main__": main()
48
0
"""simple docstring""" from __future__ import annotations def lowercase__ ( lowercase_ ,lowercase_ ) -> bool: """simple docstring""" _UpperCamelCase : Tuple = get_failure_array(lowercase_ ) # 2) Step through text searching for pattern _UpperCamelCase : Tuple = 0, 0 # index into text, pattern while i < len(lowercase_ ): if pattern[j] == text[i]: if j == (len(lowercase_ ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: _UpperCamelCase : Optional[int] = failure[j - 1] continue i += 1 return False def lowercase__ ( lowercase_ ) -> list[int]: """simple docstring""" _UpperCamelCase : Optional[Any] = [0] _UpperCamelCase : Tuple = 0 _UpperCamelCase : str = 1 while j < len(lowercase_ ): if pattern[i] == pattern[j]: i += 1 elif i > 0: _UpperCamelCase : Union[str, Any] = failure[i - 1] continue j += 1 failure.append(lowercase_ ) return failure if __name__ == "__main__": # Test 1) lowerCamelCase__ = "abc1abc12" lowerCamelCase__ = "alskfjaldsabc1abc1abc12k23adsfabcabc" lowerCamelCase__ = "alskfjaldsk23adsfabcabc" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowerCamelCase__ = "ABABX" lowerCamelCase__ = "ABABZABABYABABX" assert kmp(pattern, text) # Test 3) lowerCamelCase__ = "AAAB" lowerCamelCase__ = "ABAAAAAB" assert kmp(pattern, text) # Test 4) lowerCamelCase__ = "abcdabcy" lowerCamelCase__ = "abcxabcdabxabcdabcdabcy" assert kmp(pattern, text) # Test 5) lowerCamelCase__ = "aabaabaaa" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
355
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem lowerCamelCase__ = importlib.util.find_spec("s3fs") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 lowerCamelCase__ = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase__ ( lowercase_ ) -> str: """simple docstring""" if "://" in dataset_path: _UpperCamelCase : List[Any] = dataset_path.split("://" )[1] return dataset_path def lowercase__ ( lowercase_ ) -> bool: """simple docstring""" if fs is not None and fs.protocol != "file": return True else: return False def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Optional[Any]: """simple docstring""" _UpperCamelCase : List[str] = not is_remote_filesystem(lowercase_ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(lowercase_ ) ,fs._strip_protocol(lowercase_ ) ) else: fs.mv(lowercase_ ,lowercase_ ,recursive=lowercase_ ) def lowercase__ ( ) -> None: """simple docstring""" if hasattr(fsspec.asyn ,"reset_lock" ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: _UpperCamelCase : Dict = None _UpperCamelCase : str = None _UpperCamelCase : str = threading.Lock()
310
0
import requests from bsa import BeautifulSoup def lowerCAmelCase_ ( __lowerCamelCase = "https://www.worldometers.info/coronavirus" ): __snake_case : List[Any] = BeautifulSoup(requests.get(__lowerCamelCase ).text , "html.parser" ) __snake_case : str = soup.findAll("h1" ) __snake_case : Union[str, Any] = soup.findAll("div" , {"class": "maincounter-number"} ) keys += soup.findAll("span" , {"class": "panel-title"} ) values += soup.findAll("div" , {"class": "number-table-main"} ) return {key.text.strip(): value.text.strip() for key, value in zip(__lowerCamelCase , __lowerCamelCase )} if __name__ == "__main__": print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n") for key, value in world_covidaa_stats().items(): print(f'''{key}\n{value}\n''')
123
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean _snake_case : Union[str, Any] = 0 _snake_case : List[str] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _snake_case : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right _snake_case : int = tuple[int, int] class a : """simple docstring""" def __init__( self : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : Node | None , ) -> None: __snake_case : List[str] = pos_x __snake_case : List[str] = pos_y __snake_case : Dict = (pos_y, pos_x) __snake_case : List[Any] = goal_x __snake_case : Union[str, Any] = goal_y __snake_case : int = g_cost __snake_case : List[Any] = parent __snake_case : Optional[Any] = self.calculate_heuristic() __snake_case : Union[str, Any] = self.g_cost + self.h_cost def __snake_case ( self : Optional[int] ) -> float: __snake_case : Union[str, Any] = self.pos_x - self.goal_x __snake_case : Tuple = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowerCamelCase ) + abs(lowerCamelCase ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Optional[int] , lowerCamelCase : Node ) -> bool: return self.f_cost < other.f_cost class a : """simple docstring""" def __init__( self : List[Any] , lowerCamelCase : TPosition , lowerCamelCase : TPosition ) -> Optional[Any]: __snake_case : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase ) __snake_case : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , lowerCamelCase ) __snake_case : str = [self.start] __snake_case : list[Node] = [] __snake_case : int = False def __snake_case ( self : Tuple ) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __snake_case : Dict = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowerCamelCase ) self.closed_nodes.append(lowerCamelCase ) __snake_case : Tuple = self.get_successors(lowerCamelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCamelCase ) else: # retrieve the best current path __snake_case : Any = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCamelCase ) else: self.open_nodes.append(lowerCamelCase ) return [self.start.pos] def __snake_case ( self : Optional[Any] , lowerCamelCase : Node ) -> list[Node]: __snake_case : int = [] for action in delta: __snake_case : Tuple = parent.pos_x + action[1] __snake_case : Tuple = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) ) return successors def __snake_case ( self : Optional[Any] , lowerCamelCase : Node | None ) -> list[TPosition]: __snake_case : List[Any] = node __snake_case : Optional[int] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __snake_case : Tuple = current_node.parent path.reverse() return path class a : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase : TPosition , lowerCamelCase : TPosition ) -> None: __snake_case : str = AStar(lowerCamelCase , lowerCamelCase ) __snake_case : int = AStar(lowerCamelCase , lowerCamelCase ) __snake_case : int = False def __snake_case ( self : str ) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() __snake_case : Optional[int] = self.fwd_astar.open_nodes.pop(0 ) __snake_case : str = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowerCamelCase , lowerCamelCase ) self.fwd_astar.closed_nodes.append(lowerCamelCase ) self.bwd_astar.closed_nodes.append(lowerCamelCase ) __snake_case : Optional[Any] = current_bwd_node __snake_case : Any = current_fwd_node __snake_case : int = { self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase ), self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowerCamelCase ) else: # retrieve the best current path __snake_case : Optional[int] = astar.open_nodes.pop( astar.open_nodes.index(lowerCamelCase ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowerCamelCase ) else: astar.open_nodes.append(lowerCamelCase ) return [self.fwd_astar.start.pos] def __snake_case ( self : Any , lowerCamelCase : Node , lowerCamelCase : Node ) -> list[TPosition]: __snake_case : Optional[int] = self.fwd_astar.retrace_path(lowerCamelCase ) __snake_case : Optional[Any] = self.bwd_astar.retrace_path(lowerCamelCase ) bwd_path.pop() bwd_path.reverse() __snake_case : int = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] _snake_case : Dict = (0, 0) _snake_case : Any = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) _snake_case : List[Any] = time.time() _snake_case : Dict = AStar(init, goal) _snake_case : Optional[int] = a_star.search() _snake_case : Optional[Any] = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') _snake_case : List[str] = time.time() _snake_case : Any = BidirectionalAStar(init, goal) _snake_case : List[str] = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
123
1
"""simple docstring""" from __future__ import annotations import math def lowerCAmelCase (__UpperCamelCase : float , __UpperCamelCase : int ): """simple docstring""" __UpperCamelCase =u for i in range(1 , lowerCamelCase__ ): __UpperCamelCase =temp * (u - i) return temp def lowerCAmelCase (): """simple docstring""" __UpperCamelCase =int(input('''enter the numbers of values: ''' ) ) __UpperCamelCase =[] for _ in range(lowerCamelCase__ ): y.append([] ) for i in range(lowerCamelCase__ ): for j in range(lowerCamelCase__ ): y[i].append(lowerCamelCase__ ) __UpperCamelCase =0 print('''enter the values of parameters in a list: ''' ) __UpperCamelCase =list(map(lowerCamelCase__ , input().split() ) ) print('''enter the values of corresponding parameters: ''' ) for i in range(lowerCamelCase__ ): __UpperCamelCase =float(input() ) __UpperCamelCase =int(input('''enter the value to interpolate: ''' ) ) __UpperCamelCase =(value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , lowerCamelCase__ ): for j in range(n - i ): __UpperCamelCase =y[j + 1][i - 1] - y[j][i - 1] __UpperCamelCase =y[0][0] for i in range(1 , lowerCamelCase__ ): summ += (ucal(lowerCamelCase__ , lowerCamelCase__ ) * y[0][i]) / math.factorial(lowerCamelCase__ ) print(F"""the value at {value} is {summ}""" ) if __name__ == "__main__": main()
354
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def lowerCAmelCase (__UpperCamelCase : int ): """simple docstring""" __UpperCamelCase =FileLock(str(tmpdir / '''foo.lock''' ) ) __UpperCamelCase =FileLock(str(tmpdir / '''foo.lock''' ) ) __UpperCamelCase =0.0_1 with locka.acquire(): with pytest.raises(__UpperCamelCase ): __UpperCamelCase =time.time() locka.acquire(__UpperCamelCase ) assert time.time() - _start > timeout def lowerCAmelCase (__UpperCamelCase : Union[str, Any] ): """simple docstring""" __UpperCamelCase ='''a''' * 1_0_0_0 + '''.lock''' __UpperCamelCase =FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(__UpperCamelCase ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 __UpperCamelCase =FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(__UpperCamelCase ): locka.acquire(0 )
85
0
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase__ = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ UpperCAmelCase__ = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ UpperCAmelCase__ = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowerCamelCase__ ( datasets.Metric): def __A (self ) -> int: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __A (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=4 , UpperCAmelCase=False ) -> Tuple: _lowercase =compute_bleu( reference_corpus=lowerCamelCase__ , translation_corpus=lowerCamelCase__ , max_order=lowerCamelCase__ , smooth=lowerCamelCase__ ) (_lowercase) =score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
5
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL SCREAMING_SNAKE_CASE_:List[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""") def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , ) -> int: """simple docstring""" output_path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( _lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , enable_onnx_checker=_lowerCAmelCase , opset_version=_lowerCAmelCase , ) else: export( _lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , opset_version=_lowerCAmelCase , ) @torch.no_grad() def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ) -> List[Any]: """simple docstring""" A : Tuple = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): A : Union[str, Any] = """cuda""" elif fpaa and not torch.cuda.is_available(): raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" ) else: A : Any = """cpu""" A : Any = Path(_lowerCAmelCase ) # VAE DECODER A : Union[str, Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" ) A : Any = vae_decoder.config.latent_channels # forward only through the decoder part A : Optional[int] = vae_decoder.decode onnx_export( _lowerCAmelCase , model_args=( torch.randn(1 , _lowerCAmelCase , 25 , 25 ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ), False, ) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={ """latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""}, } , opset=_lowerCAmelCase , ) del vae_decoder if __name__ == "__main__": SCREAMING_SNAKE_CASE_:Tuple = argparse.ArgumentParser() parser.add_argument( """--model_path""", type=str, required=True, help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""", ) parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--opset""", default=14, type=int, help="""The version of the ONNX operator set to use.""", ) parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""") SCREAMING_SNAKE_CASE_:Tuple = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("""SD: Done: ONNX""")
116
0
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=lowerCamelCase_ ) class _snake_case ( lowerCamelCase_ ): UpperCamelCase__ = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} ) UpperCamelCase__ = Features({'question': Value('string' ), 'context': Value('string' )} ) UpperCamelCase__ = Features( { 'answers': Sequence( { 'text': Value('string' ), 'answer_start': Value('int32' ), } ) } ) UpperCamelCase__ = "question" UpperCamelCase__ = "context" UpperCamelCase__ = "answers" @property def SCREAMING_SNAKE_CASE ( self ): return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
366
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers snake_case : Any = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
41
0
'''simple docstring''' import numpy as np def a_ ( _lowerCAmelCase ) -> np.array: return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
208
'''simple docstring''' _UpperCamelCase = tuple[float, float, float] _UpperCamelCase = tuple[float, float, float] def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Vectorad: __lowerCamelCase : Any = end_pointa[0] - end_pointa[0] __lowerCamelCase : str = end_pointa[1] - end_pointa[1] __lowerCamelCase : Tuple = end_pointa[2] - end_pointa[2] return (x, y, z) def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Vectorad: __lowerCamelCase : List[str] = ab[1] * ac[2] - ab[2] * ac[1] # *i __lowerCamelCase : Dict = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j __lowerCamelCase : List[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> bool: return tuple(round(_lowerCAmelCase ,_lowerCAmelCase ) for x in vector ) == (0, 0, 0) def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 10 ) -> bool: __lowerCamelCase : str = create_vector(_lowerCAmelCase ,_lowerCAmelCase ) __lowerCamelCase : Dict = create_vector(_lowerCAmelCase ,_lowerCAmelCase ) return is_zero_vector(get_ad_vectors_cross(_lowerCAmelCase ,_lowerCAmelCase ) ,_lowerCAmelCase )
208
1
import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class snake_case ( unittest.TestCase): def a_ ( self : Any ) -> Optional[Any]: '''simple docstring''' _A = "laion/clap-htsat-unfused" _A = tempfile.mkdtemp() def a_ ( self : List[Any] , **a__ : int ) -> Optional[int]: '''simple docstring''' return RobertaTokenizer.from_pretrained(self.checkpoint , **a__ ) def a_ ( self : Optional[Any] , **a__ : str ) -> Tuple: '''simple docstring''' return ClapFeatureExtractor.from_pretrained(self.checkpoint , **a__ ) def a_ ( self : List[Any] ) -> List[str]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def a_ ( self : Dict ) -> List[Any]: '''simple docstring''' _A = self.get_tokenizer() _A = self.get_feature_extractor() _A = ClapProcessor(tokenizer=a__ , feature_extractor=a__ ) processor.save_pretrained(self.tmpdirname ) _A = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , a__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , a__ ) def a_ ( self : Dict ) -> str: '''simple docstring''' _A = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) _A = self.get_feature_extractor(do_normalize=a__ , padding_value=1.0 ) _A = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , a__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , a__ ) def a_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = ClapProcessor(tokenizer=a__ , feature_extractor=a__ ) _A = floats_list((3, 10_00) ) _A = feature_extractor(a__ , return_tensors="np" ) _A = processor(audios=a__ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def a_ ( self : Any ) -> Optional[Any]: '''simple docstring''' _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = ClapProcessor(tokenizer=a__ , feature_extractor=a__ ) _A = "This is a test string" _A = processor(text=a__ ) _A = tokenizer(a__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def a_ ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = ClapProcessor(tokenizer=a__ , feature_extractor=a__ ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(a__ ) _A = tokenizer.batch_decode(a__ ) self.assertListEqual(a__ , a__ ) def a_ ( self : Tuple ) -> Tuple: '''simple docstring''' _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = ClapProcessor(tokenizer=a__ , feature_extractor=a__ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
357
"""simple docstring""" def a__ ( __lowercase=2_8123 ) -> List[Any]: _A = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i _A = set() _A = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(__lowercase ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
163
0
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowerCAmelCase__ : '''simple docstring''' def __init__( self , lowercase=None , lowercase=None ): # Input as list _lowerCamelCase : Optional[int] = list(poly_a or [0] )[:] _lowerCamelCase : Tuple = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _lowerCamelCase : Tuple = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() _lowerCamelCase : Optional[Any] = len(self.polyB ) # Add 0 to make lengths equal a power of 2 _lowerCamelCase : List[str] = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform _lowerCamelCase : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product _lowerCamelCase : Optional[int] = self.__multiply() def A_ ( self , lowercase ): _lowerCamelCase : Any = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB] # Corner case if len(lowercase ) <= 1: return dft[0] # _lowerCamelCase : Tuple = self.c_max_length // 2 while next_ncol > 0: _lowerCamelCase : Optional[Any] = [[] for i in range(lowercase )] _lowerCamelCase : List[Any] = self.root**next_ncol # First half of next step _lowerCamelCase : Optional[int] = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(lowercase ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step _lowerCamelCase : List[Any] = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(lowercase ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update _lowerCamelCase : Optional[int] = new_dft _lowerCamelCase : int = next_ncol // 2 return dft[0] def A_ ( self ): _lowerCamelCase : Optional[Any] = self.__dft('A' ) _lowerCamelCase : Optional[Any] = self.__dft('B' ) _lowerCamelCase : Union[str, Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT _lowerCamelCase : Any = 2 while next_ncol <= self.c_max_length: _lowerCamelCase : Union[str, Any] = [[] for i in range(lowercase )] _lowerCamelCase : Tuple = self.root ** (next_ncol // 2) _lowerCamelCase : str = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update _lowerCamelCase : Union[str, Any] = new_inverse_c next_ncol *= 2 # Unpack _lowerCamelCase : Dict = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self ): _lowerCamelCase : Optional[int] = 'A = ' + ' + '.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) _lowerCamelCase : Any = 'B = ' + ' + '.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) _lowerCamelCase : Optional[Any] = 'A*B = ' + ' + '.join( F'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return F'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
96
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
68
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging A : int = logging.get_logger(__name__) A : List[str] = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class __A( snake_case_ ): """simple docstring""" snake_case_ = '''pix2struct_text_model''' snake_case_ = ['''past_key_values'''] snake_case_ = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , _snake_case=50_244 , _snake_case=768 , _snake_case=64 , _snake_case=2_048 , _snake_case=12 , _snake_case=12 , _snake_case=32 , _snake_case=128 , _snake_case=0.1 , _snake_case=1E-6 , _snake_case=1.0 , _snake_case="gelu_new" , _snake_case=0 , _snake_case=False , _snake_case=0 , _snake_case=1 , _snake_case=False , _snake_case=True , **_snake_case , ) -> int: '''simple docstring''' __a = vocab_size __a = hidden_size __a = d_kv __a = d_ff __a = num_layers __a = num_heads __a = relative_attention_num_buckets __a = relative_attention_max_distance __a = dropout_rate __a = layer_norm_epsilon __a = initializer_factor __a = use_cache __a = eos_token_id __a = decoder_start_token_id # for backwards compatibility __a = dense_act_fn super().__init__( pad_token_id=_snake_case , eos_token_id=_snake_case , decoder_start_token_id=_snake_case , tie_word_embeddings=_snake_case , is_decoder=_snake_case , **_snake_case , ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , **_snake_case ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(_snake_case ) __a = cls.get_config_dict(_snake_case , **_snake_case ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": __a = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_snake_case , **_snake_case ) class __A( snake_case_ ): """simple docstring""" snake_case_ = '''pix2struct_vision_model''' def __init__( self , _snake_case=768 , _snake_case=768 , _snake_case=2_048 , _snake_case=64 , _snake_case=12 , _snake_case=12 , _snake_case="gelu_new" , _snake_case=1E-6 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=1E-10 , _snake_case=1.0 , _snake_case=4_096 , _snake_case=32 , _snake_case=128 , **_snake_case , ) -> int: '''simple docstring''' super().__init__(**_snake_case ) __a = hidden_size __a = patch_embed_hidden_size __a = d_ff __a = dropout_rate __a = num_hidden_layers __a = num_attention_heads __a = initializer_range __a = initializer_factor __a = attention_dropout __a = layer_norm_eps __a = dense_act_fn __a = seq_len __a = relative_attention_num_buckets __a = relative_attention_max_distance __a = d_kv @classmethod def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , **_snake_case ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(_snake_case ) __a = cls.get_config_dict(_snake_case , **_snake_case ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": __a = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_snake_case , **_snake_case ) class __A( snake_case_ ): """simple docstring""" snake_case_ = '''pix2struct''' snake_case_ = True def __init__( self , _snake_case=None , _snake_case=None , _snake_case=1.0 , _snake_case=0.02 , _snake_case=False , _snake_case=False , _snake_case=True , **_snake_case , ) -> Optional[int]: '''simple docstring''' super().__init__(tie_word_embeddings=_snake_case , is_encoder_decoder=_snake_case , **_snake_case ) if text_config is None: __a = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: __a = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) __a = PixaStructTextConfig(**_snake_case ) __a = PixaStructVisionConfig(**_snake_case ) __a = self.text_config.decoder_start_token_id __a = self.text_config.pad_token_id __a = self.text_config.eos_token_id __a = initializer_factor __a = initializer_range __a = self.initializer_range __a = self.initializer_range __a = is_vqa @classmethod def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , _snake_case , **_snake_case ) -> Optional[Any]: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_snake_case ) def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]: '''simple docstring''' __a = copy.deepcopy(self.__dict__ ) __a = self.text_config.to_dict() __a = self.vision_config.to_dict() __a = self.__class__.model_type return output
355
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class __A( a ): snake_case_ = 0 snake_case_ = False snake_case_ = 3.0 class __A( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self ) -> Dict: '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} ) self.assertDictEqual(MockClass(a=2 , b=_snake_case ).to_kwargs() , {'''a''': 2, '''b''': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} ) @require_cuda def SCREAMING_SNAKE_CASE_ ( self ) -> str: '''simple docstring''' __a = GradScalerKwargs(init_scale=1_024 , growth_factor=2 ) AcceleratorState._reset_state() __a = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) __a = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_000 ) self.assertEqual(scaler._enabled , _snake_case ) @require_multi_gpu def SCREAMING_SNAKE_CASE_ ( self ) -> int: '''simple docstring''' __a = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(_snake_case , env=os.environ.copy() ) if __name__ == "__main__": A : List[str] = DistributedDataParallelKwargs(bucket_cap_mb=1_5, find_unused_parameters=True) A : Optional[Any] = Accelerator(kwargs_handlers=[ddp_scaler]) A : int = torch.nn.Linear(1_0_0, 2_0_0) A : Optional[int] = accelerator.prepare(model) # Check the values changed in kwargs A : List[Any] = '' A : Tuple = model.bucket_bytes_cap // (1_0_2_4 * 1_0_2_4) if observed_bucket_cap_map != 1_5: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
33
0
'''simple docstring''' # flake8: noqa # Lint as: python3 a_ : Dict = [ """VerificationMode""", """Version""", """disable_progress_bar""", """enable_progress_bar""", """is_progress_bar_enabled""", """experimental""", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
55
"""simple docstring""" import socket def _snake_case ( ): _lowerCamelCase : List[Any] = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _lowerCamelCase : Union[str, Any] = socket.gethostname() _lowerCamelCase : List[Any] = 12312 sock.connect((host, port) ) sock.send(B'Hello server!' ) with open('Received_file' , 'wb' ) as out_file: print('File opened' ) print('Receiving data...' ) while True: _lowerCamelCase : int = sock.recv(1024 ) if not data: break out_file.write(lowercase__ ) print('Successfully received the file' ) sock.close() print('Connection closed' ) if __name__ == "__main__": main()
96
0
'''simple docstring''' import logging from transformers import PretrainedConfig __lowerCAmelCase : Optional[int] =logging.getLogger(__name__) __lowerCAmelCase : Tuple ={ "bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json", } class UpperCAmelCase ( UpperCamelCase__ ): __lowercase = """bertabs""" def __init__( self :int , lowercase_ :Optional[Any]=3_05_22 , lowercase_ :Optional[Any]=5_12 , lowercase_ :List[Any]=6 , lowercase_ :Optional[int]=5_12 , lowercase_ :Union[str, Any]=8 , lowercase_ :List[str]=5_12 , lowercase_ :Tuple=0.2 , lowercase_ :Union[str, Any]=6 , lowercase_ :Tuple=7_68 , lowercase_ :str=8 , lowercase_ :List[Any]=20_48 , lowercase_ :str=0.2 , **lowercase_ :List[str] , )-> Dict: super().__init__(**lowercase_ ) A__ = vocab_size A__ = max_pos A__ = enc_layers A__ = enc_hidden_size A__ = enc_heads A__ = enc_ff_size A__ = enc_dropout A__ = dec_layers A__ = dec_hidden_size A__ = dec_heads A__ = dec_ff_size A__ = dec_dropout
123
'''simple docstring''' from __future__ import annotations from math import gcd def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int = 2 , _lowerCamelCase : int = 1 , _lowerCamelCase : int = 3 , ): # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError("The input value cannot be less than 2" ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ) -> int: return (pow(_lowerCamelCase , 2 ) + step) % modulus for _ in range(_lowerCamelCase ): # These track the position within the cycle detection logic. A__ = seed A__ = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. A__ = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A__ = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A__ = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. A__ = gcd(hare - tortoise , _lowerCamelCase ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. A__ = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse __lowerCAmelCase : Optional[int] =argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) __lowerCAmelCase : Optional[int] =parser.parse_args() __lowerCAmelCase : Dict =pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f"""{args.num} is probably prime""") else: __lowerCAmelCase : Optional[Any] =args.num // divisor print(f"""{args.num} = {divisor} * {quotient}""")
123
1
from string import ascii_lowercase, ascii_uppercase def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str: if not sentence: return "" lowerCAmelCase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:] if __name__ == "__main__": from doctest import testmod testmod()
212
import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class A__ ( __magic_name__ , unittest.TestCase ): lowercase = MvpTokenizer lowercase = MvpTokenizerFast lowercase = True lowercase = filter_roberta_detectors def _lowerCamelCase ( self : int ): '''simple docstring''' super().setUp() lowerCAmelCase__ : Dict = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) ) lowerCAmelCase__ : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] lowerCAmelCase__ : Any = {'unk_token': '<unk>'} lowerCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) lowerCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(a ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(a ) ) def _lowerCamelCase ( self : str , **a : Tuple ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **a ) def _lowerCamelCase ( self : Dict , **a : Optional[int] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a ) def _lowerCamelCase ( self : Tuple , a : Dict ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def _lowerCamelCase ( self : List[str] ): '''simple docstring''' return MvpTokenizer.from_pretrained('RUCAIBox/mvp' ) @cached_property def _lowerCamelCase ( self : Dict ): '''simple docstring''' return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' ) @require_torch def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] lowerCAmelCase__ : List[Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase__ : int = tokenizer(a , max_length=len(a ) , padding=a , return_tensors='pt' ) self.assertIsInstance(a , a ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowerCAmelCase__ : List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(a , a ) # Test that special tokens are reset @require_torch def _lowerCamelCase ( self : str ): '''simple docstring''' lowerCAmelCase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase__ : Any = tokenizer(a , padding=a , return_tensors='pt' ) # check if input_ids are returned and no labels self.assertIn('input_ids' , a ) self.assertIn('attention_mask' , a ) self.assertNotIn('labels' , a ) self.assertNotIn('decoder_attention_mask' , a ) @require_torch def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase__ : Tuple = tokenizer(text_target=a , max_length=32 , padding='max_length' , return_tensors='pt' ) self.assertEqual(32 , targets['input_ids'].shape[1] ) @require_torch def _lowerCamelCase ( self : Optional[int] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase__ : str = tokenizer( ['I am a small frog' * 1_024, 'I am a small frog'] , padding=a , truncation=a , return_tensors='pt' ) self.assertIsInstance(a , a ) self.assertEqual(batch.input_ids.shape , (2, 1_024) ) @require_torch def _lowerCamelCase ( self : Any ): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = ['A long paragraph for summarization.'] lowerCAmelCase__ : Any = [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowerCAmelCase__ : List[Any] = tokenizer(a , text_target=a , return_tensors='pt' ) lowerCAmelCase__ : Optional[int] = inputs['input_ids'] lowerCAmelCase__ : str = inputs['labels'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def _lowerCamelCase ( self : Any ): '''simple docstring''' pass def _lowerCamelCase ( self : Tuple ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCAmelCase__ : str = self.rust_tokenizer_class.from_pretrained(a , **a ) lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained(a , **a ) lowerCAmelCase__ : Optional[int] = 'A, <mask> AllenNLP sentence.' lowerCAmelCase__ : int = tokenizer_r.encode_plus(a , add_special_tokens=a , return_token_type_ids=a ) lowerCAmelCase__ : Optional[int] = tokenizer_p.encode_plus(a , add_special_tokens=a , return_token_type_ids=a ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) lowerCAmelCase__ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) lowerCAmelCase__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( a , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
212
1
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""", # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class A__ ( __magic_name__ ): lowercase = 'perceiver' def __init__( self : List[Any] , a : Dict=256 , a : List[Any]=1_280 , a : Dict=768 , a : Union[str, Any]=1 , a : Union[str, Any]=26 , a : Tuple=8 , a : str=8 , a : List[str]=None , a : str=None , a : List[Any]="kv" , a : int=1 , a : Any=1 , a : List[Any]="gelu" , a : Optional[Any]=0.1 , a : List[str]=0.0_2 , a : List[str]=1E-12 , a : List[str]=True , a : Optional[int]=262 , a : Dict=2_048 , a : Optional[Any]=56 , a : Dict=[368, 496] , a : List[str]=16 , a : int=1_920 , a : Any=16 , a : List[Any]=[1, 16, 224, 224] , **a : List[str] , ): '''simple docstring''' super().__init__(**a ) lowerCAmelCase__ : Dict = num_latents lowerCAmelCase__ : List[str] = d_latents lowerCAmelCase__ : Optional[int] = d_model lowerCAmelCase__ : List[str] = num_blocks lowerCAmelCase__ : Dict = num_self_attends_per_block lowerCAmelCase__ : Any = num_self_attention_heads lowerCAmelCase__ : Optional[Any] = num_cross_attention_heads lowerCAmelCase__ : Optional[Any] = qk_channels lowerCAmelCase__ : Optional[Any] = v_channels lowerCAmelCase__ : int = cross_attention_shape_for_attention lowerCAmelCase__ : int = self_attention_widening_factor lowerCAmelCase__ : int = cross_attention_widening_factor lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : int = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Optional[Any] = use_query_residual # masked language modeling attributes lowerCAmelCase__ : Tuple = vocab_size lowerCAmelCase__ : Optional[int] = max_position_embeddings # image classification attributes lowerCAmelCase__ : Tuple = image_size # flow attributes lowerCAmelCase__ : Dict = train_size # multimodal autoencoding attributes lowerCAmelCase__ : Union[str, Any] = num_frames lowerCAmelCase__ : Optional[Any] = audio_samples_per_frame lowerCAmelCase__ : int = samples_per_patch lowerCAmelCase__ : str = output_shape class A__ ( __magic_name__ ): @property def _lowerCamelCase ( self : int ): '''simple docstring''' if self.task == "multiple-choice": lowerCAmelCase__ : Any = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowerCAmelCase__ : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('inputs', dynamic_axis), ('attention_mask', dynamic_axis), ] ) @property def _lowerCamelCase ( self : str ): '''simple docstring''' return 1E-4 def _lowerCamelCase ( self : Optional[Any] , a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , a : int = -1 , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , a : int = 3 , a : int = 40 , a : int = 40 , ): '''simple docstring''' if isinstance(a , a ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase__ : Optional[Any] = compute_effective_axis_dimension( a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase__ : List[str] = preprocessor.num_special_tokens_to_add(a ) lowerCAmelCase__ : List[str] = compute_effective_axis_dimension( a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase__ : Dict = [' '.join(['a'] ) * seq_length] * batch_size lowerCAmelCase__ : int = dict(preprocessor(a , return_tensors=a ) ) lowerCAmelCase__ : Any = inputs.pop('input_ids' ) return inputs elif isinstance(a , a ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase__ : Any = compute_effective_axis_dimension(a , fixed_dimension=OnnxConfig.default_fixed_batch ) lowerCAmelCase__ : str = self._generate_dummy_images(a , a , a , a ) lowerCAmelCase__ : List[Any] = dict(preprocessor(images=a , return_tensors=a ) ) lowerCAmelCase__ : Any = inputs.pop('pixel_values' ) return inputs else: raise ValueError( 'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
307
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position lowerCamelCase__ = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip lowerCamelCase__ = concatenate_datasets lowerCamelCase__ = DownloadConfig lowerCamelCase__ = DownloadManager lowerCamelCase__ = DownloadMode lowerCamelCase__ = DownloadConfig lowerCamelCase__ = DownloadMode lowerCamelCase__ = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
307
1
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _a ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A : Any = RobertaTokenizer A : List[str] = RobertaTokenizerFast A : Tuple = True A : List[Any] = {'''cls_token''': '''<s>'''} def UpperCamelCase_ ( self ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt SCREAMING_SNAKE_CASE : int = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] SCREAMING_SNAKE_CASE : Any = dict(zip(__lowerCamelCase, range(len(__lowerCamelCase ) ) ) ) SCREAMING_SNAKE_CASE : List[str] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] SCREAMING_SNAKE_CASE : Optional[Any] = {'''unk_token''': '''<unk>'''} SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] ) SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file, 'w', encoding='utf-8' ) as fp: fp.write(json.dumps(__lowerCamelCase ) + '\n' ) with open(self.merges_file, 'w', encoding='utf-8' ) as fp: fp.write('\n'.join(__lowerCamelCase ) ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname, **__lowerCamelCase ) def UpperCamelCase_ ( self, **A ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname, **__lowerCamelCase ) def UpperCamelCase_ ( self, A ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = '''lower newer''' SCREAMING_SNAKE_CASE : str = '''lower newer''' return input_text, output_text def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map ) SCREAMING_SNAKE_CASE : Optional[Any] = '''lower newer''' SCREAMING_SNAKE_CASE : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] SCREAMING_SNAKE_CASE : str = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True) self.assertListEqual(__lowerCamelCase, __lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ), __lowerCamelCase ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!', add_special_tokens=__lowerCamelCase ), [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418', add_special_tokens=__lowerCamelCase ), [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2], ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class.from_pretrained('roberta-base' ) SCREAMING_SNAKE_CASE : Any = tokenizer.encode('sequence builders', add_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE : int = tokenizer.encode('multi-sequence build', add_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE : str = tokenizer.encode( 'sequence builders', add_special_tokens=__lowerCamelCase, add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode( 'sequence builders', 'multi-sequence build', add_special_tokens=__lowerCamelCase, add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase, __lowerCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() SCREAMING_SNAKE_CASE : Optional[int] = '''Encode this sequence.''' SCREAMING_SNAKE_CASE : List[Any] = tokenizer.byte_encoder[''' '''.encode('utf-8' )[0]] # Testing encoder arguments SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(__lowerCamelCase, add_special_tokens=__lowerCamelCase, add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(__lowerCamelCase, __lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(__lowerCamelCase, add_special_tokens=__lowerCamelCase, add_prefix_space=__lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(__lowerCamelCase, __lowerCamelCase ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(__lowerCamelCase, add_special_tokens=__lowerCamelCase ) SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(__lowerCamelCase, __lowerCamelCase ) # Testing spaces after special tokens SCREAMING_SNAKE_CASE : Dict = '''<mask>''' tokenizer.add_special_tokens( {'mask_token': AddedToken(__lowerCamelCase, lstrip=__lowerCamelCase, rstrip=__lowerCamelCase )} ) # mask token has a left space SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = '''Encode <mask> sequence''' SCREAMING_SNAKE_CASE : int = '''Encode <mask>sequence''' SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(__lowerCamelCase ) SCREAMING_SNAKE_CASE : str = encoded.index(__lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(__lowerCamelCase, __lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.encode(__lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = encoded.index(__lowerCamelCase ) SCREAMING_SNAKE_CASE : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(__lowerCamelCase, __lowerCamelCase ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase, **__lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained(__lowerCamelCase, **__lowerCamelCase ) SCREAMING_SNAKE_CASE : int = '''A, <mask> AllenNLP sentence.''' SCREAMING_SNAKE_CASE : int = tokenizer_r.encode_plus(__lowerCamelCase, add_special_tokens=__lowerCamelCase, return_token_type_ids=__lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_p.encode_plus(__lowerCamelCase, add_special_tokens=__lowerCamelCase, return_token_type_ids=__lowerCamelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ), sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ), sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ), ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'], [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( __lowerCamelCase, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( __lowerCamelCase, ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def UpperCamelCase_ ( self ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ): SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=__lowerCamelCase, add_prefix_space=__lowerCamelCase, trim_offsets=__lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) SCREAMING_SNAKE_CASE : Dict = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'], __lowerCamelCase ) self.assertEqual(post_processor_state['add_prefix_space'], __lowerCamelCase ) self.assertEqual(post_processor_state['trim_offsets'], __lowerCamelCase ) def UpperCamelCase_ ( self ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): SCREAMING_SNAKE_CASE : Optional[Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}" SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase, use_fast=__lowerCamelCase, add_prefix_space=__lowerCamelCase, trim_offsets=__lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r(__lowerCamelCase, return_offsets_mapping=__lowerCamelCase, add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0], (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1], (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )), ) SCREAMING_SNAKE_CASE : List[Any] = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase, use_fast=__lowerCamelCase, add_prefix_space=__lowerCamelCase, trim_offsets=__lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = tokenizer_r(__lowerCamelCase, return_offsets_mapping=__lowerCamelCase, add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0], (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1], (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )), ) SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase, use_fast=__lowerCamelCase, add_prefix_space=__lowerCamelCase, trim_offsets=__lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r(__lowerCamelCase, return_offsets_mapping=__lowerCamelCase, add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0], (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1], (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )), ) SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase, use_fast=__lowerCamelCase, add_prefix_space=__lowerCamelCase, trim_offsets=__lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = tokenizer_r(__lowerCamelCase, return_offsets_mapping=__lowerCamelCase, add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0], (0, len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1], (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )), ) SCREAMING_SNAKE_CASE : Tuple = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase, use_fast=__lowerCamelCase, add_prefix_space=__lowerCamelCase, trim_offsets=__lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = tokenizer_r(__lowerCamelCase, return_offsets_mapping=__lowerCamelCase, add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )), ) SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase, use_fast=__lowerCamelCase, add_prefix_space=__lowerCamelCase, trim_offsets=__lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(__lowerCamelCase, return_offsets_mapping=__lowerCamelCase, add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )), ) SCREAMING_SNAKE_CASE : str = self.rust_tokenizer_class.from_pretrained( __lowerCamelCase, use_fast=__lowerCamelCase, add_prefix_space=__lowerCamelCase, trim_offsets=__lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r(__lowerCamelCase, return_offsets_mapping=__lowerCamelCase, add_special_tokens=__lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(__lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1], (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )), )
251
"""simple docstring""" def __lowercase ( snake_case_ : int ) ->int: '''simple docstring''' assert ( isinstance(snake_case_ ,snake_case_ ) and number_of_steps > 0 ), F"""number_of_steps needs to be positive integer, your input {number_of_steps}""" if number_of_steps == 1: return 1 __A , __A : List[Any] = 1, 1 for _ in range(number_of_steps - 1 ): __A , __A : List[str] = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
179
0
from __future__ import annotations def UpperCamelCase ( __magic_name__ : list[int] , __magic_name__ : int ) -> bool: """simple docstring""" if len(__magic_name__ ) == 0: return False lowercase__ = len(__magic_name__ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , __magic_name__ ) else: return binary_search(a_list[midpoint + 1 :] , __magic_name__ ) if __name__ == "__main__": A : str = input('Enter numbers separated by comma:\n').strip() A : Any = [int(item.strip()) for item in user_input.split(',')] A : str = int(input('Enter the number to be found in the list:\n').strip()) A : Optional[Any] = '' if binary_search(sequence, target) else 'not ' print(F'{target} was {not_str}found in {sequence}')
358
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging A : str = logging.get_logger(__name__) # pylint: disable=invalid-name class A ( UpperCAmelCase__ ): '''simple docstring''' def __init__(self : List[str] , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : CLIPTextModel , _UpperCAmelCase : CLIPTokenizer , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCAmelCase : StableDiffusionSafetyChecker , _UpperCAmelCase : CLIPImageProcessor , ) -> Dict: """simple docstring""" super().__init__() self.register_modules( vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , ) def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> Tuple: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_UpperCAmelCase ) def lowerCamelCase__ (self : Optional[Any] ) -> Optional[Any]: """simple docstring""" self.enable_attention_slicing(_UpperCAmelCase ) @torch.no_grad() def __call__(self : Any , _UpperCAmelCase : Union[str, List[str]] , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 50 , _UpperCAmelCase : float = 7.5 , _UpperCAmelCase : Optional[Union[str, List[str]]] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : Optional[torch.Generator] = None , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[torch.FloatTensor] = None , **_UpperCAmelCase : Any , ) -> Tuple: """simple docstring""" if isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__ = 1 elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__ = len(_UpperCAmelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_UpperCAmelCase )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(_UpperCAmelCase )}.''' ) # get prompt text embeddings lowercase__ = self.tokenizer( _UpperCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) lowercase__ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape lowercase__ = text_embeddings.repeat(1 , _UpperCAmelCase , 1 ) lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCAmelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowercase__ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowercase__ = 42 if negative_prompt is None: lowercase__ = [""""""] elif type(_UpperCAmelCase ) is not type(_UpperCAmelCase ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCAmelCase )} !=''' f''' {type(_UpperCAmelCase )}.''' ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowercase__ = [negative_prompt] elif batch_size != len(_UpperCAmelCase ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(_UpperCAmelCase )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: lowercase__ = negative_prompt lowercase__ = text_input_ids.shape[-1] lowercase__ = self.tokenizer( _UpperCAmelCase , padding="""max_length""" , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" , ) lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowercase__ = uncond_embeddings.shape[1] lowercase__ = uncond_embeddings.repeat(_UpperCAmelCase , _UpperCAmelCase , 1 ) lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCAmelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase__ = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) lowercase__ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps lowercase__ = torch.randn( _UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to(self.device ) lowercase__ = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device="""cpu""" , dtype=_UpperCAmelCase ).to( self.device ) else: lowercase__ = torch.randn( _UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase ) lowercase__ = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) lowercase__ = latents_reference.to(self.device ) lowercase__ = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images lowercase__ = (latents_shape[3] - latents_shape_reference[3]) // 2 lowercase__ = (latents_shape[2] - latents_shape_reference[2]) // 2 lowercase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx lowercase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy lowercase__ = 0 if dx < 0 else dx lowercase__ = 0 if dy < 0 else dy lowercase__ = max(-dx , 0 ) lowercase__ = max(-dy , 0 ) # import pdb # pdb.set_trace() lowercase__ = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(_UpperCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand lowercase__ = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowercase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowercase__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowercase__ = {} if accepts_eta: lowercase__ = eta for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ): # expand the latents if we are doing classifier free guidance lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase__ = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) # predict the noise residual lowercase__ = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample # perform guidance if do_classifier_free_guidance: lowercase__ , lowercase__ = noise_pred.chunk(2 ) lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 lowercase__ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowercase__ = 1 / 0.18_215 * latents lowercase__ = self.vae.decode(_UpperCAmelCase ).sample lowercase__ = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: lowercase__ = self.feature_extractor(self.numpy_to_pil(_UpperCAmelCase ) , return_tensors="""pt""" ).to( self.device ) lowercase__ , lowercase__ = self.safety_checker( images=_UpperCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: lowercase__ = None if output_type == "pil": lowercase__ = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
146
0
"""simple docstring""" import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging UpperCAmelCase__ = logging.get_logger(__name__) def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=False ) -> List[str]: try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: _snake_case = os.path.abspath(_lowerCAmelCase ) logger.info(f'''Loading PyTorch weights from {pt_path}''' ) _snake_case = torch.load(_lowerCAmelCase , map_location='''cpu''' ) logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) _snake_case = convert_pytorch_state_dict_to_flax(_lowerCAmelCase , _lowerCAmelCase ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files _snake_case = convert_pytorch_sharded_state_dict_to_flax(_lowerCAmelCase , _lowerCAmelCase ) return flax_state_dict def _UpperCAmelCase ( __lowerCamelCase : Tuple[str] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, jnp.ndarray] , __lowerCamelCase : str , ) -> (Tuple[str], np.ndarray): def is_key_or_prefix_key_in_dict(__lowerCamelCase : Tuple[str] ) -> bool: return len(set(_lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0 # layer norm _snake_case = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean _snake_case = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var _snake_case = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # embedding _snake_case = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCAmelCase ): return renamed_pt_tuple_key, pt_tensor # conv layer _snake_case = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ): _snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer _snake_case = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCAmelCase ): _snake_case = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight _snake_case = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias _snake_case = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 _snake_case = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): _snake_case = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): _snake_case = pt_tuple_key[-2] + '''_v''' if name is not None: _snake_case = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]: # convert pytorch tensor to numpy _snake_case = {k: v.numpy() for k, v in pt_state_dict.items()} _snake_case = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: _snake_case = flax_model.params['''params'''] else: _snake_case = flax_model.params _snake_case = flatten_dict(_lowerCAmelCase ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: _snake_case = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(_lowerCAmelCase ) _snake_case = {} _snake_case = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) _snake_case = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _snake_case = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary _snake_case = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: _snake_case = pt_tuple_key[1:] # Correctly rename weight parameters _snake_case = rename_key_and_reshape_tensor( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # add model prefix if necessary _snake_case = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: _snake_case = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: _snake_case = jnp.asarray(_lowerCAmelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_lowerCAmelCase , _lowerCAmelCase ) continue # also add unexpected weight so that warning is thrown _snake_case = jnp.asarray(_lowerCAmelCase ) else: # also add unexpected weight so that warning is thrown _snake_case = jnp.asarray(_lowerCAmelCase ) return unflatten_dict(_lowerCAmelCase ) def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ) -> str: import torch # Load the index _snake_case = {} for shard_file in shard_filenames: # load using msgpack utils _snake_case = torch.load(_lowerCAmelCase ) _snake_case = {k: v.numpy() for k, v in pt_state_dict.items()} _snake_case = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: _snake_case = flax_model.params['''params'''] _snake_case = flatten_dict(_lowerCAmelCase ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: _snake_case = flax_model.params _snake_case = flatten_dict(_lowerCAmelCase ) _snake_case = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) _snake_case = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): _snake_case = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary _snake_case = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: _snake_case = pt_tuple_key[1:] # Correctly rename weight parameters _snake_case = rename_key_and_reshape_tensor( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # add model prefix if necessary _snake_case = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: _snake_case = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: _snake_case = jnp.asarray(_lowerCAmelCase ) continue if "var" in flax_key[-1]: _snake_case = jnp.asarray(_lowerCAmelCase ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(_lowerCAmelCase , _lowerCAmelCase ) continue # also add unexpected weight so that warning is thrown _snake_case = jnp.asarray(_lowerCAmelCase ) else: # also add unexpected weight so that warning is thrown _snake_case = jnp.asarray(_lowerCAmelCase ) return unflatten_dict(_lowerCAmelCase ) def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[Any] ) -> Tuple: _snake_case = os.path.abspath(_lowerCAmelCase ) logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class _snake_case = getattr(_lowerCAmelCase , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(_lowerCAmelCase , '''rb''' ) as state_f: try: _snake_case = from_bytes(_lowerCAmelCase , state_f.read() ) except UnpicklingError: raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(_lowerCAmelCase , _lowerCAmelCase ) def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str ) -> Any: try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights _snake_case = flatten_dict(jax.tree_util.tree_map(lambda __lowerCamelCase : x.dtype == jnp.bfloataa , _lowerCAmelCase ) ).values() if any(_lowerCAmelCase ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) _snake_case = jax.tree_util.tree_map( lambda __lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCAmelCase ) _snake_case = flatten_dict(_lowerCAmelCase ) _snake_case = pt_model.state_dict() _snake_case = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) _snake_case = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys _snake_case = [] _snake_case = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): _snake_case = flax_key_tuple[0] == pt_model.base_model_prefix _snake_case = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: _snake_case = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: _snake_case = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCAmelCase ) not in pt_model_dict: # conv layer _snake_case = flax_key_tuple[:-1] + ('''weight''',) _snake_case = jnp.transpose(_lowerCAmelCase , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ) not in pt_model_dict: # linear layer _snake_case = flax_key_tuple[:-1] + ('''weight''',) _snake_case = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _snake_case = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: _snake_case = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: _snake_case = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: _snake_case = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: _snake_case = '''.'''.join(_lowerCAmelCase ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. _snake_case = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: _snake_case = key.split('''.''' ) _snake_case = None if key_components[-3::2] == ["parametrizations", "original0"]: _snake_case = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: _snake_case = key_components[-2] + '''_v''' if name is not None: _snake_case = key_components[:-3] + [name] _snake_case = '''.'''.join(_lowerCAmelCase ) _snake_case = key if flax_key in special_pt_names: _snake_case = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict _snake_case = np.asarray(_lowerCAmelCase ) if not isinstance(_lowerCAmelCase , np.ndarray ) else flax_tensor _snake_case = torch.from_numpy(_lowerCAmelCase ) # remove from missing keys missing_keys.remove(_lowerCAmelCase ) else: # weight is not expected by PyTorch model unexpected_keys.append(_lowerCAmelCase ) pt_model.load_state_dict(_lowerCAmelCase ) # re-transform missing_keys to list _snake_case = list(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(_lowerCAmelCase ) > 0: logger.warning( f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
288
'''simple docstring''' import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] ) -> Optional[int]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] ) -> Dict: UpperCAmelCase : Dict = tmp_path / '''cache''' UpperCAmelCase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase : str = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_json_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict , _lowerCAmelCase : int ) -> Optional[int]: UpperCAmelCase : Any = tmp_path / '''cache''' UpperCAmelCase : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase : Any = features.copy() if features else default_expected_features UpperCAmelCase : List[Any] = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase : Dict = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_json_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}, ] , ) def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ) -> Tuple: UpperCAmelCase : Optional[Any] = tmp_path / '''cache''' UpperCAmelCase : Optional[int] = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''} UpperCAmelCase : int = features.copy() if features else default_expected_features UpperCAmelCase : Any = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase : Tuple = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ) -> Union[str, Any]: # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} UpperCAmelCase : Tuple = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''} UpperCAmelCase : List[str] = features.copy() UpperCAmelCase : Union[str, Any] = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase : Tuple = tmp_path / '''cache''' UpperCAmelCase : List[str] = JsonDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ) -> Optional[Any]: UpperCAmelCase : Any = tmp_path / '''cache''' UpperCAmelCase : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase : List[Any] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read() _check_json_dataset(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def snake_case_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Dict: if issubclass(_lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase : str = jsonl_path elif issubclass(_lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase : Dict = [jsonl_path] UpperCAmelCase : int = tmp_path / '''cache''' UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase : Optional[int] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_json_dataset(_lowerCAmelCase , _lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict=("train",) ) -> Union[str, Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) for split in splits: UpperCAmelCase : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ) -> Any: UpperCAmelCase : Optional[Any] = tmp_path / '''cache''' UpperCAmelCase : List[str] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): UpperCAmelCase : Optional[int] = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> int: UpperCAmelCase : Dict = tmp_path / '''cache''' UpperCAmelCase : Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase : Optional[int] = features.copy() if features else default_expected_features UpperCAmelCase : Union[str, Any] = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) UpperCAmelCase : Tuple = JsonDatasetReader({'''train''': jsonl_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> Union[str, Any]: if split: UpperCAmelCase : Optional[int] = {split: jsonl_path} else: UpperCAmelCase : Any = '''train''' UpperCAmelCase : Any = {'''train''': jsonl_path, '''test''': jsonl_path} UpperCAmelCase : Tuple = tmp_path / '''cache''' UpperCAmelCase : int = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} UpperCAmelCase : Optional[Any] = JsonDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_json_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def snake_case_ ( _lowerCAmelCase : List[str] ) -> str: return json.load(_lowerCAmelCase ) def snake_case_ ( _lowerCAmelCase : Dict ) -> str: return [json.loads(_lowerCAmelCase ) for line in buffer] class SCREAMING_SNAKE_CASE: """simple docstring""" @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def A ( self : Union[str, Any] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> Dict: with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case ).write() buffer.seek(0 ) UpperCAmelCase : Union[str, Any] = load_json_function(__snake_case ) assert isinstance(__snake_case , __snake_case ) assert isinstance(exported_content[0] , __snake_case ) assert len(__snake_case ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def A ( self : Optional[int] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[Any] ) -> List[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case ).write() buffer.seek(0 ) UpperCAmelCase : Union[str, Any] = load_json(__snake_case ) assert isinstance(__snake_case , __snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__snake_case ) == 10 @pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] ) def A ( self : str , __snake_case : str , __snake_case : str , __snake_case : int ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , num_proc=2 ).write() buffer.seek(0 ) UpperCAmelCase : Any = load_json_function(__snake_case ) assert isinstance(__snake_case , __snake_case ) assert isinstance(exported_content[0] , __snake_case ) assert len(__snake_case ) == 10 @pytest.mark.parametrize( '''orient, container, keys, len_at''' , [ ('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None), ('''split''', dict, {'''columns''', '''data'''}, '''data'''), ('''index''', dict, set('''0123456789''' ), None), ('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''), ('''values''', list, None, None), ('''table''', dict, {'''schema''', '''data'''}, '''data'''), ] , ) def A ( self : Any , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str] ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , lines=__snake_case , orient=__snake_case , num_proc=2 ).write() buffer.seek(0 ) UpperCAmelCase : List[str] = load_json(__snake_case ) assert isinstance(__snake_case , __snake_case ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(__snake_case , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(__snake_case ) == 10 def A ( self : List[Any] , __snake_case : str ) -> Dict: with pytest.raises(__snake_case ): with io.BytesIO() as buffer: JsonDatasetWriter(__snake_case , __snake_case , num_proc=0 ) @pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] ) def A ( self : Optional[int] , __snake_case : Any , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> Union[str, Any]: UpperCAmelCase : List[str] = tmp_path_factory.mktemp('''data''' ) / F"""test.json.{extension}""" UpperCAmelCase : List[Any] = str(shared_datadir / F"""test_file.json.{extension}""" ) JsonDatasetWriter(__snake_case , __snake_case , compression=__snake_case ).write() with fsspec.open(__snake_case , '''rb''' , compression='''infer''' ) as f: UpperCAmelCase : str = f.read() with fsspec.open(__snake_case , '''rb''' , compression='''infer''' ) as f: UpperCAmelCase : Optional[int] = f.read() assert exported_content == original_content
23
0
"""simple docstring""" import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' lowerCAmelCase : List[Any] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCAmelCase : Optional[int] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: lowerCAmelCase : Any = 4 lowerCAmelCase : Tuple = 4_8 lowerCAmelCase : Any = """pixelshuffle_aux""" elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCAmelCase : Any = [6, 6, 6, 6] lowerCAmelCase : Tuple = 6_0 lowerCAmelCase : int = [6, 6, 6, 6] lowerCAmelCase : List[Any] = """pixelshuffledirect""" elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCAmelCase : Optional[Any] = 4 lowerCAmelCase : Optional[Any] = """nearest+conv""" elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: lowerCAmelCase : Tuple = 1 lowerCAmelCase : Optional[int] = 1 lowerCAmelCase : List[Any] = 1_2_6 lowerCAmelCase : Optional[Any] = 7 lowerCAmelCase : List[Any] = 255.0 lowerCAmelCase : Any = """""" return config def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: lowerCAmelCase : Tuple = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: lowerCAmelCase : List[str] = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" ) if "layers" in name: lowerCAmelCase : Tuple = name.replace("layers" , "encoder.stages" ) if "residual_group.blocks" in name: lowerCAmelCase : Tuple = name.replace("residual_group.blocks" , "layers" ) if "attn.proj" in name: lowerCAmelCase : List[Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: lowerCAmelCase : List[str] = name.replace("attn" , "attention.self" ) if "norm1" in name: lowerCAmelCase : Dict = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: lowerCAmelCase : Union[str, Any] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: lowerCAmelCase : List[Any] = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: lowerCAmelCase : List[str] = name.replace("mlp.fc2" , "output.dense" ) if "q_bias" in name: lowerCAmelCase : List[str] = name.replace("q_bias" , "query.bias" ) if "k_bias" in name: lowerCAmelCase : Tuple = name.replace("k_bias" , "key.bias" ) if "v_bias" in name: lowerCAmelCase : Tuple = name.replace("v_bias" , "value.bias" ) if "cpb_mlp" in name: lowerCAmelCase : Optional[Any] = name.replace("cpb_mlp" , "continuous_position_bias_mlp" ) if "patch_embed.proj" in name: lowerCAmelCase : Any = name.replace("patch_embed.proj" , "patch_embed.projection" ) if name == "norm.weight": lowerCAmelCase : Optional[Any] = """layernorm.weight""" if name == "norm.bias": lowerCAmelCase : str = """layernorm.bias""" if "conv_first" in name: lowerCAmelCase : str = name.replace("conv_first" , "first_convolution" ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: lowerCAmelCase : Optional[int] = name.replace("conv_last" , "final_convolution" ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: lowerCAmelCase : int = name.replace("conv_before_upsample.0" , "conv_before_upsample" ) if "upsample.0" in name: lowerCAmelCase : Dict = name.replace("upsample.0" , "upsample.convolution_0" ) if "upsample.2" in name: lowerCAmelCase : Any = name.replace("upsample.2" , "upsample.convolution_1" ) lowerCAmelCase : Optional[Any] = """upsample.""" + name elif config.upsampler == "pixelshuffledirect": lowerCAmelCase : Optional[Any] = name.replace("upsample.0.weight" , "upsample.conv.weight" ) lowerCAmelCase : Any = name.replace("upsample.0.bias" , "upsample.conv.bias" ) else: pass else: lowerCAmelCase : Dict = """swin2sr.""" + name return name def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ): '''simple docstring''' for key in orig_state_dict.copy().keys(): lowerCAmelCase : Tuple = orig_state_dict.pop(lowercase__ ) if "qkv" in key: lowerCAmelCase : Tuple = key.split("." ) lowerCAmelCase : str = int(key_split[1] ) lowerCAmelCase : Tuple = int(key_split[4] ) lowerCAmelCase : str = config.embed_dim if "weight" in key: lowerCAmelCase : Union[str, Any] = val[:dim, :] lowerCAmelCase : List[Any] = val[dim : dim * 2, :] lowerCAmelCase : int = val[-dim:, :] else: lowerCAmelCase : List[Any] = val[:dim] lowerCAmelCase : Optional[int] = val[dim : dim * 2] lowerCAmelCase : List[Any] = val[-dim:] pass else: lowerCAmelCase : Tuple = val return orig_state_dict def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' lowerCAmelCase : List[str] = get_config(lowercase__ ) lowerCAmelCase : Any = SwinaSRForImageSuperResolution(lowercase__ ) model.eval() lowerCAmelCase : List[str] = torch.hub.load_state_dict_from_url(lowercase__ , map_location="cpu" ) lowerCAmelCase : List[Any] = convert_state_dict(lowercase__ , lowercase__ ) lowerCAmelCase : str = model.load_state_dict(lowercase__ , strict=lowercase__ ) if len(lowercase__ ) > 0: raise ValueError("Missing keys when converting: {}".format(lowercase__ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f"""Unexpected key {key} in state_dict""" ) # verify values lowerCAmelCase : Tuple = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true""" lowerCAmelCase : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("RGB" ) lowerCAmelCase : Dict = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values lowerCAmelCase : Any = 1_2_6 if """Jpeg""" in checkpoint_url else 2_5_6 lowerCAmelCase : Optional[Any] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) lowerCAmelCase : int = transforms(lowercase__ ).unsqueeze(0 ) if config.num_channels == 1: lowerCAmelCase : int = pixel_values[:, 0, :, :].unsqueeze(1 ) lowerCAmelCase : int = model(lowercase__ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: lowerCAmelCase : Optional[int] = torch.Size([1, 3, 5_1_2, 5_1_2] ) lowerCAmelCase : Optional[Any] = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: lowerCAmelCase : Optional[Any] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) lowerCAmelCase : List[Any] = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here lowerCAmelCase : List[str] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) lowerCAmelCase : str = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: lowerCAmelCase : Tuple = torch.Size([1, 3, 5_1_2, 5_1_2] ) lowerCAmelCase : List[Any] = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: lowerCAmelCase : List[str] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) lowerCAmelCase : Union[str, Any] = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}""" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase__ , atol=1E-3 ) print("Looks ok!" ) lowerCAmelCase : Tuple = { """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": ( """swin2SR-classical-sr-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": ( """swin2SR-classical-sr-x4-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": ( """swin2SR-compressed-sr-x4-48""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": ( """swin2SR-lightweight-x2-64""" ), """https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": ( """swin2SR-realworld-sr-x4-64-bsrgan-psnr""" ), } lowerCAmelCase : Dict = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowercase__ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(lowercase__ ) if push_to_hub: model.push_to_hub(f"""caidas/{model_name}""" ) processor.push_to_hub(f"""caidas/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''', type=str, help='''URL of the original Swin2SR checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''') lowerCAmelCase__ = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
356
"""simple docstring""" from ...configuration_utils import PretrainedConfig lowerCAmelCase__ = { '''google/tapas-base-finetuned-sqa''': ( '''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wtq''': ( '''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json''' ), '''google/tapas-base-finetuned-wikisql-supervised''': ( '''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json''' ), '''google/tapas-base-finetuned-tabfact''': ( '''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json''' ), } class SCREAMING_SNAKE_CASE__ ( lowercase ): """simple docstring""" a : Any ="tapas" def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1_024 , snake_case__=[3, 256, 256, 2, 256, 256, 10] , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=10.0 , snake_case__=0 , snake_case__=1.0 , snake_case__=None , snake_case__=1.0 , snake_case__=False , snake_case__=None , snake_case__=1.0 , snake_case__=1.0 , snake_case__=False , snake_case__=False , snake_case__="ratio" , snake_case__=None , snake_case__=None , snake_case__=64 , snake_case__=32 , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=False , snake_case__=True , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ): """simple docstring""" super().__init__(pad_token_id=snake_case__ , **snake_case__ ) # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes) lowerCAmelCase : List[Any] = vocab_size lowerCAmelCase : List[str] = hidden_size lowerCAmelCase : Optional[Any] = num_hidden_layers lowerCAmelCase : List[str] = num_attention_heads lowerCAmelCase : Optional[Any] = hidden_act lowerCAmelCase : List[Any] = intermediate_size lowerCAmelCase : Optional[Any] = hidden_dropout_prob lowerCAmelCase : str = attention_probs_dropout_prob lowerCAmelCase : Any = max_position_embeddings lowerCAmelCase : Dict = type_vocab_sizes lowerCAmelCase : Union[str, Any] = initializer_range lowerCAmelCase : str = layer_norm_eps # Fine-tuning task hyperparameters lowerCAmelCase : Dict = positive_label_weight lowerCAmelCase : Union[str, Any] = num_aggregation_labels lowerCAmelCase : Optional[Any] = aggregation_loss_weight lowerCAmelCase : List[Any] = use_answer_as_supervision lowerCAmelCase : Dict = answer_loss_importance lowerCAmelCase : List[Any] = use_normalized_answer_loss lowerCAmelCase : List[str] = huber_loss_delta lowerCAmelCase : Optional[int] = temperature lowerCAmelCase : Optional[int] = aggregation_temperature lowerCAmelCase : Any = use_gumbel_for_cells lowerCAmelCase : Union[str, Any] = use_gumbel_for_aggregation lowerCAmelCase : Union[str, Any] = average_approximation_function lowerCAmelCase : int = cell_selection_preference lowerCAmelCase : Dict = answer_loss_cutoff lowerCAmelCase : Optional[int] = max_num_rows lowerCAmelCase : Union[str, Any] = max_num_columns lowerCAmelCase : Any = average_logits_per_cell lowerCAmelCase : List[Any] = select_one_column lowerCAmelCase : Tuple = allow_empty_column_selection lowerCAmelCase : str = init_cell_selection_weights_to_zero lowerCAmelCase : List[Any] = reset_position_index_per_cell lowerCAmelCase : Optional[Any] = disable_per_token_loss # Aggregation hyperparameters lowerCAmelCase : List[str] = aggregation_labels lowerCAmelCase : List[str] = no_aggregation_label_index if isinstance(self.aggregation_labels , snake_case__ ): lowerCAmelCase : Union[str, Any] = {int(snake_case__ ): v for k, v in aggregation_labels.items()}
133
0
'''simple docstring''' import argparse import collections import json import os import re import string import sys import numpy as np lowercase__ : List[str] = re.compile(R'\b(a|an|the)\b', re.UNICODE) lowercase__ : List[Any] = None def a__ ( ) -> Optional[int]: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' ) parser.add_argument('''data_file''', metavar='''data.json''', help='''Input data JSON file.''' ) parser.add_argument('''pred_file''', metavar='''pred.json''', help='''Model predictions.''' ) parser.add_argument( '''--out-file''', '''-o''', metavar='''eval.json''', help='''Write accuracy metrics to file (default is stdout).''' ) parser.add_argument( '''--na-prob-file''', '''-n''', metavar='''na_prob.json''', help='''Model estimates of probability of no answer.''' ) parser.add_argument( '''--na-prob-thresh''', '''-t''', type=lowercase, default=1.0, help='''Predict "" if no-answer probability exceeds this (default = 1.0).''', ) parser.add_argument( '''--out-image-dir''', '''-p''', metavar='''out_images''', default=lowercase, help='''Save precision-recall curves to directory.''' ) parser.add_argument('''--verbose''', '''-v''', action='''store_true''' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def a__ ( lowercase : int ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _UpperCamelCase = bool(qa['''answers''']['''text'''] ) return qid_to_has_ans def a__ ( lowercase : Optional[int] ) -> Tuple: """simple docstring""" def remove_articles(lowercase : Optional[int] ): return ARTICLES_REGEX.sub(''' ''', lowercase ) def white_space_fix(lowercase : Union[str, Any] ): return " ".join(text.split() ) def remove_punc(lowercase : Optional[int] ): _UpperCamelCase = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowercase : Any ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowercase ) ) ) ) def a__ ( lowercase : Optional[Any] ) -> str: """simple docstring""" if not s: return [] return normalize_answer(lowercase ).split() def a__ ( lowercase : Any, lowercase : str ) -> str: """simple docstring""" return int(normalize_answer(lowercase ) == normalize_answer(lowercase ) ) def a__ ( lowercase : Optional[int], lowercase : Any ) -> List[str]: """simple docstring""" _UpperCamelCase = get_tokens(lowercase ) _UpperCamelCase = get_tokens(lowercase ) _UpperCamelCase = collections.Counter(lowercase ) & collections.Counter(lowercase ) _UpperCamelCase = sum(common.values() ) if len(lowercase ) == 0 or len(lowercase ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 _UpperCamelCase = 1.0 * num_same / len(lowercase ) _UpperCamelCase = 1.0 * num_same / len(lowercase ) _UpperCamelCase = (2 * precision * recall) / (precision + recall) return fa def a__ ( lowercase : str, lowercase : Dict ) -> str: """simple docstring""" _UpperCamelCase = {} _UpperCamelCase = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _UpperCamelCase = qa['''id'''] _UpperCamelCase = [t for t in qa['''answers''']['''text'''] if normalize_answer(lowercase )] if not gold_answers: # For unanswerable questions, only correct answer is empty string _UpperCamelCase = [''''''] if qid not in preds: print(F"""Missing prediction for {qid}""" ) continue _UpperCamelCase = preds[qid] # Take max over all gold answers _UpperCamelCase = max(compute_exact(lowercase, lowercase ) for a in gold_answers ) _UpperCamelCase = max(compute_fa(lowercase, lowercase ) for a in gold_answers ) return exact_scores, fa_scores def a__ ( lowercase : Tuple, lowercase : int, lowercase : List[Any], lowercase : Tuple ) -> List[Any]: """simple docstring""" _UpperCamelCase = {} for qid, s in scores.items(): _UpperCamelCase = na_probs[qid] > na_prob_thresh if pred_na: _UpperCamelCase = float(not qid_to_has_ans[qid] ) else: _UpperCamelCase = s return new_scores def a__ ( lowercase : List[Any], lowercase : int, lowercase : List[str]=None ) -> Tuple: """simple docstring""" if not qid_list: _UpperCamelCase = len(lowercase ) return collections.OrderedDict( [ ('''exact''', 1_0_0.0 * sum(exact_scores.values() ) / total), ('''f1''', 1_0_0.0 * sum(fa_scores.values() ) / total), ('''total''', total), ] ) else: _UpperCamelCase = len(lowercase ) return collections.OrderedDict( [ ('''exact''', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total), ('''f1''', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total), ('''total''', total), ] ) def a__ ( lowercase : Dict, lowercase : Optional[Any], lowercase : Optional[int] ) -> Union[str, Any]: """simple docstring""" for k in new_eval: _UpperCamelCase = new_eval[k] def a__ ( lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Any ) -> str: """simple docstring""" plt.step(lowercase, lowercase, color='''b''', alpha=0.2, where='''post''' ) plt.fill_between(lowercase, lowercase, step='''post''', alpha=0.2, color='''b''' ) plt.xlabel('''Recall''' ) plt.ylabel('''Precision''' ) plt.xlim([0.0, 1.0_5] ) plt.ylim([0.0, 1.0_5] ) plt.title(lowercase ) plt.savefig(lowercase ) plt.clf() def a__ ( lowercase : Dict, lowercase : int, lowercase : str, lowercase : str, lowercase : List[Any]=None, lowercase : List[Any]=None ) -> str: """simple docstring""" _UpperCamelCase = sorted(lowercase, key=lambda lowercase : na_probs[k] ) _UpperCamelCase = 0.0 _UpperCamelCase = 1.0 _UpperCamelCase = 0.0 _UpperCamelCase = [1.0] _UpperCamelCase = [0.0] _UpperCamelCase = 0.0 for i, qid in enumerate(lowercase ): if qid_to_has_ans[qid]: true_pos += scores[qid] _UpperCamelCase = true_pos / float(i + 1 ) _UpperCamelCase = true_pos / float(lowercase ) if i == len(lowercase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(lowercase ) recalls.append(lowercase ) if out_image: plot_pr_curve(lowercase, lowercase, lowercase, lowercase ) return {"ap": 1_0_0.0 * avg_prec} def a__ ( lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Tuple, lowercase : str, lowercase : Dict, lowercase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" if out_image_dir and not os.path.exists(lowercase ): os.makedirs(lowercase ) _UpperCamelCase = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return _UpperCamelCase = make_precision_recall_eval( lowercase, lowercase, lowercase, lowercase, out_image=os.path.join(lowercase, '''pr_exact.png''' ), title='''Precision-Recall curve for Exact Match score''', ) _UpperCamelCase = make_precision_recall_eval( lowercase, lowercase, lowercase, lowercase, out_image=os.path.join(lowercase, '''pr_f1.png''' ), title='''Precision-Recall curve for F1 score''', ) _UpperCamelCase = {k: float(lowercase ) for k, v in qid_to_has_ans.items()} _UpperCamelCase = make_precision_recall_eval( lowercase, lowercase, lowercase, lowercase, out_image=os.path.join(lowercase, '''pr_oracle.png''' ), title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''', ) merge_eval(lowercase, lowercase, '''pr_exact''' ) merge_eval(lowercase, lowercase, '''pr_f1''' ) merge_eval(lowercase, lowercase, '''pr_oracle''' ) def a__ ( lowercase : Any, lowercase : int, lowercase : Any, lowercase : Dict ) -> int: """simple docstring""" if not qid_list: return _UpperCamelCase = [na_probs[k] for k in qid_list] _UpperCamelCase = np.ones_like(lowercase ) / float(len(lowercase ) ) plt.hist(lowercase, weights=lowercase, bins=20, range=(0.0, 1.0) ) plt.xlabel('''Model probability of no-answer''' ) plt.ylabel('''Proportion of dataset''' ) plt.title(F"""Histogram of no-answer probability: {name}""" ) plt.savefig(os.path.join(lowercase, F"""na_prob_hist_{name}.png""" ) ) plt.clf() def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Any, lowercase : Any ) -> List[str]: """simple docstring""" _UpperCamelCase = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) _UpperCamelCase = num_no_ans _UpperCamelCase = cur_score _UpperCamelCase = 0.0 _UpperCamelCase = sorted(lowercase, key=lambda lowercase : na_probs[k] ) for i, qid in enumerate(lowercase ): if qid not in scores: continue if qid_to_has_ans[qid]: _UpperCamelCase = scores[qid] else: if preds[qid]: _UpperCamelCase = -1 else: _UpperCamelCase = 0 cur_score += diff if cur_score > best_score: _UpperCamelCase = cur_score _UpperCamelCase = na_probs[qid] return 1_0_0.0 * best_score / len(lowercase ), best_thresh def a__ ( lowercase : Union[str, Any], lowercase : List[Any], lowercase : Any, lowercase : Optional[int], lowercase : List[Any], lowercase : List[str] ) -> List[Any]: """simple docstring""" _UpperCamelCase , _UpperCamelCase = find_best_thresh(lowercase, lowercase, lowercase, lowercase ) _UpperCamelCase , _UpperCamelCase = find_best_thresh(lowercase, lowercase, lowercase, lowercase ) _UpperCamelCase = best_exact _UpperCamelCase = exact_thresh _UpperCamelCase = best_fa _UpperCamelCase = fa_thresh def a__ ( ) -> Union[str, Any]: """simple docstring""" with open(OPTS.data_file ) as f: _UpperCamelCase = json.load(lowercase ) _UpperCamelCase = dataset_json['''data'''] with open(OPTS.pred_file ) as f: _UpperCamelCase = json.load(lowercase ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: _UpperCamelCase = json.load(lowercase ) else: _UpperCamelCase = {k: 0.0 for k in preds} _UpperCamelCase = make_qid_to_has_ans(lowercase ) # maps qid to True/False _UpperCamelCase = [k for k, v in qid_to_has_ans.items() if v] _UpperCamelCase = [k for k, v in qid_to_has_ans.items() if not v] _UpperCamelCase , _UpperCamelCase = get_raw_scores(lowercase, lowercase ) _UpperCamelCase = apply_no_ans_threshold(lowercase, lowercase, lowercase, OPTS.na_prob_thresh ) _UpperCamelCase = apply_no_ans_threshold(lowercase, lowercase, lowercase, OPTS.na_prob_thresh ) _UpperCamelCase = make_eval_dict(lowercase, lowercase ) if has_ans_qids: _UpperCamelCase = make_eval_dict(lowercase, lowercase, qid_list=lowercase ) merge_eval(lowercase, lowercase, '''HasAns''' ) if no_ans_qids: _UpperCamelCase = make_eval_dict(lowercase, lowercase, qid_list=lowercase ) merge_eval(lowercase, lowercase, '''NoAns''' ) if OPTS.na_prob_file: find_all_best_thresh(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(lowercase, lowercase, lowercase, lowercase, lowercase, OPTS.out_image_dir ) histogram_na_prob(lowercase, lowercase, OPTS.out_image_dir, '''hasAns''' ) histogram_na_prob(lowercase, lowercase, OPTS.out_image_dir, '''noAns''' ) if OPTS.out_file: with open(OPTS.out_file, '''w''' ) as f: json.dump(lowercase, lowercase ) else: print(json.dumps(lowercase, indent=2 ) ) if __name__ == "__main__": lowercase__ : Optional[int] = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
324
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : List[str] = logging.get_logger(__name__) lowercase__ : Optional[int] = { 'MIT/ast-finetuned-audioset-10-10-0.4593': ( 'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json' ), } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" _snake_case : int = 'audio-spectrogram-transformer' def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=768 , lowerCAmelCase__ : Optional[Any]=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : int=3072 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-1_2 , lowerCAmelCase__ : Any=16 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=10 , lowerCAmelCase__ : int=10 , lowerCAmelCase__ : Dict=1024 , lowerCAmelCase__ : Optional[int]=128 , **lowerCAmelCase__ : List[Any] , ) -> Tuple: '''simple docstring''' super().__init__(**lowerCAmelCase__ ) _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = patch_size _UpperCamelCase = qkv_bias _UpperCamelCase = frequency_stride _UpperCamelCase = time_stride _UpperCamelCase = max_length _UpperCamelCase = num_mel_bins
324
1
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__( _lowerCAmelCase ): '''simple docstring''' def __init__( self , __lowerCamelCase , __lowerCamelCase=1_3 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=2 , __lowerCamelCase=9_9 , __lowerCamelCase=0 , __lowerCamelCase=3_2 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=2 , __lowerCamelCase=0.02 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase="last" , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : int = parent _SCREAMING_SNAKE_CASE : str = batch_size _SCREAMING_SNAKE_CASE : Optional[int] = seq_length _SCREAMING_SNAKE_CASE : Tuple = is_training _SCREAMING_SNAKE_CASE : Optional[int] = use_input_lengths _SCREAMING_SNAKE_CASE : str = use_token_type_ids _SCREAMING_SNAKE_CASE : Optional[Any] = use_labels _SCREAMING_SNAKE_CASE : List[str] = gelu_activation _SCREAMING_SNAKE_CASE : Union[str, Any] = sinusoidal_embeddings _SCREAMING_SNAKE_CASE : int = causal _SCREAMING_SNAKE_CASE : Optional[int] = asm _SCREAMING_SNAKE_CASE : int = n_langs _SCREAMING_SNAKE_CASE : List[str] = vocab_size _SCREAMING_SNAKE_CASE : List[str] = n_special _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Dict = num_hidden_layers _SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads _SCREAMING_SNAKE_CASE : int = hidden_dropout_prob _SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings _SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size _SCREAMING_SNAKE_CASE : Optional[Any] = type_sequence_label_size _SCREAMING_SNAKE_CASE : Optional[int] = initializer_range _SCREAMING_SNAKE_CASE : Optional[Any] = num_labels _SCREAMING_SNAKE_CASE : Any = num_choices _SCREAMING_SNAKE_CASE : Optional[int] = summary_type _SCREAMING_SNAKE_CASE : int = use_proj _SCREAMING_SNAKE_CASE : Dict = scope def UpperCamelCase_ ( self ) -> Any: _SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE : str = None if self.use_input_lengths: _SCREAMING_SNAKE_CASE : Optional[Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length _SCREAMING_SNAKE_CASE : Dict = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) _SCREAMING_SNAKE_CASE : Optional[Any] = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : List[str] = None if self.use_labels: _SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , 2 ).float() _SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE : str = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCamelCase_ ( self ) -> Optional[Any]: return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE : Any = FlaubertModel(config=_lowercase ) model.to(_lowercase ) model.eval() _SCREAMING_SNAKE_CASE : str = model(_lowercase , lengths=_lowercase , langs=_lowercase ) _SCREAMING_SNAKE_CASE : int = model(_lowercase , langs=_lowercase ) _SCREAMING_SNAKE_CASE : Tuple = model(_lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]: _SCREAMING_SNAKE_CASE : str = FlaubertWithLMHeadModel(_lowercase ) model.to(_lowercase ) model.eval() _SCREAMING_SNAKE_CASE : Any = model(_lowercase , token_type_ids=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]: _SCREAMING_SNAKE_CASE : Dict = FlaubertForQuestionAnsweringSimple(_lowercase ) model.to(_lowercase ) model.eval() _SCREAMING_SNAKE_CASE : int = model(_lowercase ) _SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowercase , start_positions=_lowercase , end_positions=_lowercase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[str]: _SCREAMING_SNAKE_CASE : Optional[int] = FlaubertForQuestionAnswering(_lowercase ) model.to(_lowercase ) model.eval() _SCREAMING_SNAKE_CASE : Any = model(_lowercase ) _SCREAMING_SNAKE_CASE : Dict = model( _lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , p_mask=_lowercase , ) _SCREAMING_SNAKE_CASE : Dict = model( _lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , ) ((_SCREAMING_SNAKE_CASE ) , ) : str = result_with_labels.to_tuple() _SCREAMING_SNAKE_CASE : Optional[int] = model(_lowercase , start_positions=_lowercase , end_positions=_lowercase ) ((_SCREAMING_SNAKE_CASE ) , ) : Tuple = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Dict: _SCREAMING_SNAKE_CASE : int = FlaubertForSequenceClassification(_lowercase ) model.to(_lowercase ) model.eval() _SCREAMING_SNAKE_CASE : List[str] = model(_lowercase ) _SCREAMING_SNAKE_CASE : str = model(_lowercase , labels=_lowercase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = self.num_labels _SCREAMING_SNAKE_CASE : Dict = FlaubertForTokenClassification(_lowercase ) model.to(_lowercase ) model.eval() _SCREAMING_SNAKE_CASE : Tuple = model(_lowercase , attention_mask=_lowercase , labels=_lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Any: _SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_choices _SCREAMING_SNAKE_CASE : List[Any] = FlaubertForMultipleChoice(config=_lowercase ) model.to(_lowercase ) model.eval() _SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE : List[Any] = model( _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) : List[str] = config_and_inputs _SCREAMING_SNAKE_CASE : Any = { "input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths, "attention_mask": input_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): '''simple docstring''' __snake_case = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __snake_case = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : Dict = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": _SCREAMING_SNAKE_CASE : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowercase ) _SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowercase ) return inputs_dict def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : Optional[int] = FlaubertModelTester(self ) _SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=_lowercase , emb_dim=3_7 ) def UpperCamelCase_ ( self ) -> List[Any]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*_lowercase ) def UpperCamelCase_ ( self ) -> Any: _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*_lowercase ) def UpperCamelCase_ ( self ) -> Optional[Any]: _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*_lowercase ) def UpperCamelCase_ ( self ) -> Dict: _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*_lowercase ) def UpperCamelCase_ ( self ) -> Any: _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*_lowercase ) def UpperCamelCase_ ( self ) -> List[Any]: _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*_lowercase ) def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*_lowercase ) @slow def UpperCamelCase_ ( self ) -> Any: for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = FlaubertModel.from_pretrained(_lowercase ) self.assertIsNotNone(_lowercase ) @slow @require_torch_gpu def UpperCamelCase_ ( self ) -> int: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return _SCREAMING_SNAKE_CASE : List[str] = True _SCREAMING_SNAKE_CASE : Optional[Any] = model_class(config=_lowercase ) _SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_for_class(_lowercase , _lowercase ) _SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.trace( _lowercase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_lowercase , os.path.join(_lowercase , "traced_model.pt" ) ) _SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_lowercase , "traced_model.pt" ) , map_location=_lowercase ) loaded(inputs_dict["input_ids"].to(_lowercase ) , inputs_dict["attention_mask"].to(_lowercase ) ) @require_torch class lowerCAmelCase__( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase_ ( self ) -> str: _SCREAMING_SNAKE_CASE : int = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) _SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(_lowercase )[0] _SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , _lowercase ) _SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
370
from math import factorial def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ): # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', f"fifty-two card deck is: {combinations(52, 5)}\n", ) print( 'If a class of 40 students must be arranged into groups of', f"4 for group projects, there are {combinations(40, 4)} ways", 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', f"are {combinations(10, 3)} ways that first, second and", 'third place can be awarded.', )
325
0
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): __UpperCamelCase =AlbertConfig.from_json_file(__a ) print(F'Building PyTorch model from configuration: {config}' ) __UpperCamelCase =AlbertForPreTraining(__a ) # Load weights from tf checkpoint load_tf_weights_in_albert(__a , __a , __a ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , __a ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--albert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained ALBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _A = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
62
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging A : Optional[Any] = logging.get_logger(__name__) A : str = '''▁''' A : Any = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } A : List[Any] = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } A : Tuple = { '''facebook/m2m100_418M''': 1_0_2_4, } # fmt: off A : Optional[int] = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES __lowerCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase : Dict = ['''input_ids''', '''attention_mask'''] __lowerCamelCase : List[int] = [] __lowerCamelCase : List[int] = [] def __init__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : List[Any]="<s>" , __lowerCAmelCase : List[Any]="</s>" , __lowerCAmelCase : Optional[int]="</s>" , __lowerCAmelCase : Optional[Any]="<pad>" , __lowerCAmelCase : Any="<unk>" , __lowerCAmelCase : Any="m2m100" , __lowerCAmelCase : Optional[Dict[str, Any]] = None , __lowerCAmelCase : Dict=8 , **__lowerCAmelCase : Tuple , ) -> None: """simple docstring""" A__ = {} if sp_model_kwargs is None else sp_model_kwargs A__ = language_codes A__ = FAIRSEQ_LANGUAGE_CODES[language_codes] A__ = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code} A__ = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(__lowerCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(__lowerCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , language_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__lowerCAmelCase , **__lowerCAmelCase , ) A__ = vocab_file A__ = load_json(__lowerCAmelCase ) A__ = {v: k for k, v in self.encoder.items()} A__ = spm_file A__ = load_spm(__lowerCAmelCase , self.sp_model_kwargs ) A__ = len(self.encoder ) A__ = { self.get_lang_token(__lowerCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase ) } A__ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__lowerCAmelCase )} A__ = {v: k for k, v in self.lang_token_to_id.items()} A__ = src_lang if src_lang is not None else """en""" A__ = tgt_lang A__ = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) A__ = num_madeup_words @property def a_ ( self : Optional[int] ) -> int: """simple docstring""" return len(self.encoder ) + len(self.lang_token_to_id ) @property def a_ ( self : Optional[Any] ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def a_ ( self : List[Any] , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def a_ ( self : Optional[int] , __lowerCAmelCase : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase ) def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] ) def a_ ( self : Optional[int] , __lowerCAmelCase : int ) -> str: """simple docstring""" if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(__lowerCAmelCase , self.unk_token ) def a_ ( self : Optional[int] , __lowerCAmelCase : Dict ) -> str: """simple docstring""" A__ = [] A__ = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCAmelCase ) + token A__ = [] else: current_sub_tokens.append(__lowerCAmelCase ) out_string += self.sp_model.decode(__lowerCAmelCase ) return out_string.strip() def a_ ( self : List[str] , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) A__ = [1] * len(self.prefix_tokens ) A__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones def a_ ( self : Tuple , __lowerCAmelCase : List[int] , __lowerCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def a_ ( self : int ) -> Dict: """simple docstring""" A__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ) -> Dict: """simple docstring""" A__ = self.__dict__.copy() A__ = None return state def __setstate__( self : str , __lowerCAmelCase : Dict ) -> None: """simple docstring""" A__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): A__ = {} A__ = load_spm(self.spm_file , self.sp_model_kwargs ) def a_ ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" A__ = Path(__lowerCAmelCase ) if not save_dir.is_dir(): raise OSError(f'{save_directory} should be a directory' ) A__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) A__ = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , __lowerCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , __lowerCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(__lowerCAmelCase , """wb""" ) as fi: A__ = self.sp_model.serialized_model_proto() fi.write(__lowerCAmelCase ) return (str(__lowerCAmelCase ), str(__lowerCAmelCase )) def a_ ( self : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : str = "en" , __lowerCAmelCase : Optional[List[str]] = None , __lowerCAmelCase : str = "ro" , **__lowerCAmelCase : List[Any] , ) -> BatchEncoding: """simple docstring""" A__ = src_lang A__ = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def a_ ( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[str] , __lowerCAmelCase : Optional[str] , **__lowerCAmelCase : Tuple ) -> Tuple: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) A__ = src_lang A__ = self(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase ) A__ = self.get_lang_id(__lowerCAmelCase ) A__ = tgt_lang_id return inputs def a_ ( self : Dict ) -> int: """simple docstring""" self.set_src_lang_special_tokens(self.src_lang ) def a_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" self.set_tgt_lang_special_tokens(self.tgt_lang ) def a_ ( self : str , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) A__ = self.lang_token_to_id[lang_token] A__ = [self.cur_lang_id] A__ = [self.eos_token_id] def a_ ( self : Tuple , __lowerCAmelCase : str ) -> None: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) A__ = self.lang_token_to_id[lang_token] A__ = [self.cur_lang_id] A__ = [self.eos_token_id] def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> str: """simple docstring""" return self.lang_code_to_token[lang] def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> int: """simple docstring""" A__ = self.get_lang_token(__lowerCAmelCase ) return self.lang_token_to_id[lang_token] def __lowerCamelCase ( __a :str , __a :Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: """simple docstring""" A__ = sentencepiece.SentencePieceProcessor(**__a ) spm.Load(str(__a ) ) return spm def __lowerCamelCase ( __a :str ) -> Union[Dict, List]: """simple docstring""" with open(__a , """r""" ) as f: return json.load(__a ) def __lowerCamelCase ( __a :List[Any] , __a :str ) -> None: """simple docstring""" with open(__a , """w""" ) as f: json.dump(__a , __a , indent=2 )
274
0
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'} _UpperCAmelCase = { 'vocab_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt', }, 'emoji_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json', }, } _UpperCAmelCase = { 'abeja/gpt-neox-japanese-2.7b': 2048, } def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int] ) -> Optional[Any]: with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f: __lowerCAmelCase : int = json.loads(f.read() ) __lowerCAmelCase : Dict = collections.OrderedDict() __lowerCAmelCase : str = collections.OrderedDict() __lowerCAmelCase : Union[str, Any] = collections.OrderedDict() with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f: __lowerCAmelCase : Tuple = f.readlines() __lowerCAmelCase : Tuple = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token] for idx, b in enumerate(SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = b __lowerCAmelCase : Dict = idx for wd in b: __lowerCAmelCase : List[str] = idx return vocab, raw_vocab, ids_to_tokens, emoji class snake_case_ ( __lowercase ): A_ = VOCAB_FILES_NAMES A_ = PRETRAINED_VOCAB_FILES_MAP A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ = ['input_ids', 'attention_mask'] def __init__( self : str , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Any="<|endoftext|>" , _snake_case : str="<|endoftext|>" , _snake_case : str="<|startoftext|>" , _snake_case : List[Any]="<|endoftext|>" , _snake_case : str=False , **_snake_case : List[Any] , )->Union[str, Any]: '''simple docstring''' super().__init__( unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , ) if not os.path.isfile(_snake_case ): raise ValueError( F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained''' """ model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" ) if not os.path.isfile(_snake_case ): raise ValueError( F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google''' """ pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" ) __lowerCAmelCase : Any = do_clean_text __lowerCAmelCase : Union[str, Any] = load_vocab_and_emoji(_snake_case , _snake_case ) __lowerCAmelCase : int = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def UpperCAmelCase__ ( self : int )->str: '''simple docstring''' return len(self.raw_vocab ) def UpperCAmelCase__ ( self : Tuple )->Any: '''simple docstring''' return dict(self.raw_vocab , **self.added_tokens_encoder ) def UpperCAmelCase__ ( self : Any , _snake_case : str )->Optional[int]: '''simple docstring''' return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text ) def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Optional[Any] )->Any: '''simple docstring''' return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) ) def UpperCAmelCase__ ( self : int , _snake_case : Any )->int: '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(_snake_case ) def UpperCAmelCase__ ( self : Optional[int] , _snake_case : int )->List[Any]: '''simple docstring''' __lowerCAmelCase : str = """""".join(_snake_case ).strip() return out_string def UpperCAmelCase__ ( self : List[str] , _snake_case : "Conversation" )->List[int]: '''simple docstring''' __lowerCAmelCase : Union[str, Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] ) if len(_snake_case ) > self.model_max_length: __lowerCAmelCase : List[str] = input_ids[-self.model_max_length :] return input_ids def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = 0 if os.path.isdir(_snake_case ): __lowerCAmelCase : Dict = os.path.join( _snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __lowerCAmelCase : List[Any] = os.path.join( _snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] ) else: __lowerCAmelCase : Union[str, Any] = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""] ) __lowerCAmelCase : Dict = ( (filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""] ) with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' """ Please check that the vocabulary is not corrupted!""" ) __lowerCAmelCase : List[str] = token_index writer.write(""",""".join(_snake_case ) + """\n""" ) index += 1 with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer: json.dump(self.emoji , _snake_case ) return vocab_file, emoji_file class snake_case_ ( __lowercase ): def __init__( self : Optional[Any] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Optional[int] )->List[Any]: '''simple docstring''' __lowerCAmelCase : Optional[Any] = vocab # same as swe __lowerCAmelCase : str = ids_to_tokens # same as bpe __lowerCAmelCase : Dict = emoji __lowerCAmelCase : int = np.max([len(_snake_case ) for w in self.vocab.keys()] ) __lowerCAmelCase : str = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" ) __lowerCAmelCase : Optional[Any] = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" ) __lowerCAmelCase : Tuple = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" ) __lowerCAmelCase : Optional[Any] = re.compile( R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" ) __lowerCAmelCase : Union[str, Any] = re.compile( R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" ) __lowerCAmelCase : str = re.compile( R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" ) __lowerCAmelCase : List[Any] = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿""" __lowerCAmelCase : Union[str, Any] = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟""" __lowerCAmelCase : str = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} ) def __len__( self : int )->int: '''simple docstring''' return len(self.ids_to_tokens ) def UpperCAmelCase__ ( self : List[str] , _snake_case : Any )->str: '''simple docstring''' __lowerCAmelCase : List[str] = self.content_repattera.sub("""<URL>""" , _snake_case ) __lowerCAmelCase : Tuple = self.content_repattera.sub("""<EMAIL>""" , _snake_case ) __lowerCAmelCase : Optional[Any] = self.content_repattera.sub("""<TEL>""" , _snake_case ) __lowerCAmelCase : str = self.content_repattera.sub("""<DATE>""" , _snake_case ) __lowerCAmelCase : Tuple = self.content_repattera.sub("""<DATE>""" , _snake_case ) __lowerCAmelCase : Tuple = self.content_repattera.sub("""<PRICE>""" , _snake_case ) __lowerCAmelCase : List[Any] = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: __lowerCAmelCase : str = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" ) return content def UpperCAmelCase__ ( self : str , _snake_case : List[Any] , _snake_case : Optional[int]=False )->int: '''simple docstring''' __lowerCAmelCase : Optional[int] = text.replace(""" """ , """<SP>""" ) __lowerCAmelCase : Optional[int] = text.replace(""" """ , """<SP>""" ) __lowerCAmelCase : Union[str, Any] = text.replace("""\r\n""" , """<BR>""" ) __lowerCAmelCase : Tuple = text.replace("""\n""" , """<BR>""" ) __lowerCAmelCase : List[str] = text.replace("""\r""" , """<BR>""" ) __lowerCAmelCase : Dict = text.replace("""\t""" , """<TAB>""" ) __lowerCAmelCase : Dict = text.replace("""—""" , """ー""" ) __lowerCAmelCase : Tuple = text.replace("""−""" , """ー""" ) for k, v in self.emoji["emoji"].items(): if k in text: __lowerCAmelCase : Optional[Any] = text.replace(_snake_case , _snake_case ) if clean: __lowerCAmelCase : List[Any] = self.clean_text(_snake_case ) def check_simbol(_snake_case : List[str] ): __lowerCAmelCase : Optional[int] = x.encode() if len(_snake_case ) == 1 and len(_snake_case ) == 2: __lowerCAmelCase : Optional[Any] = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0xc2a1 and c <= 0xc2bf) or (c >= 0xc780 and c <= 0xc783) or (c >= 0xcab9 and c <= 0xcbbf) or (c >= 0xcc80 and c <= 0xcda2) ): return True return False def checkuae(_snake_case : Union[str, Any] ): __lowerCAmelCase : Dict = x.encode() if len(_snake_case ) == 1 and len(_snake_case ) == 3: __lowerCAmelCase : List[str] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0xe2_8080 and c <= 0xe2_b07f: return True return False __lowerCAmelCase : Dict = 0 __lowerCAmelCase : Dict = [] while pos < len(_snake_case ): __lowerCAmelCase : str = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3 __lowerCAmelCase : Tuple = [] # (token_id, token, pos) for e in range(_snake_case , _snake_case , -1 ): __lowerCAmelCase : Optional[int] = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(_snake_case ) > 2: __lowerCAmelCase : Tuple = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(_snake_case ) > 0: # the smallest token_id is adopted __lowerCAmelCase : int = sorted(_snake_case , key=lambda _snake_case : x[0] )[0] result.append(_snake_case ) __lowerCAmelCase : int = e else: __lowerCAmelCase : Dict = pos + 1 __lowerCAmelCase : Dict = text[pos:end] if check_simbol(_snake_case ): result.append("""<KIGOU>""" ) elif checkuae(_snake_case ): result.append("""<U2000U2BFF>""" ) else: for i in wd.encode("""utf-8""" ): result.append("""<|byte%d|>""" % i ) __lowerCAmelCase : int = end return result def UpperCAmelCase__ ( self : List[str] , _snake_case : Optional[int] , _snake_case : List[Any]="\n" )->List[Any]: '''simple docstring''' __lowerCAmelCase : List[str] = [] __lowerCAmelCase : Union[str, Any] = [] __lowerCAmelCase : Optional[Any] = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(_snake_case ) > 0: words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) ) __lowerCAmelCase : Optional[Any] = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["""emoji_inv"""][word] ) elif word == "<SP>": words.append(""" """ ) elif word == "<BR>": words.append(_snake_case ) elif word == "<TAB>": words.append("""\t""" ) elif word == "<BLOCK>": words.append("""▀""" ) elif word == "<KIGOU>": words.append("""ǀ""" ) elif word == "<U2000U2BFF>": words.append("""‖""" ) else: words.append(_snake_case ) if len(_snake_case ) > 0: words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) ) __lowerCAmelCase : Dict = """""".join(_snake_case ) return text
362
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _UpperCAmelCase = { 'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ 'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTBigCodeForSequenceClassification', 'GPTBigCodeForTokenClassification', 'GPTBigCodeForCausalLM', 'GPTBigCodeModel', 'GPTBigCodePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
232
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase : List[str] = { 'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'], 'tokenization_lxmert': ['LxmertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Tuple = ['LxmertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Tuple = [ 'LxmertEncoder', 'LxmertForPreTraining', 'LxmertForQuestionAnswering', 'LxmertModel', 'LxmertPreTrainedModel', 'LxmertVisualFeatureEncoder', 'LxmertXLayer', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Optional[int] = [ 'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLxmertForPreTraining', 'TFLxmertMainLayer', 'TFLxmertModel', 'TFLxmertPreTrainedModel', 'TFLxmertVisualFeatureEncoder', ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys _UpperCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
220
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( __snake_case : int ): '''simple docstring''' return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') _UpperCamelCase : Tuple = int(input('Enter number: ').strip()) print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
220
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase = { 'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'], 'feature_extraction_whisper': ['WhisperFeatureExtractor'], 'processing_whisper': ['WhisperProcessor'], 'tokenization_whisper': ['WhisperTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = ['WhisperTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ 'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'WhisperForConditionalGeneration', 'WhisperModel', 'WhisperPreTrainedModel', 'WhisperForAudioClassification', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ 'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWhisperForConditionalGeneration', 'TFWhisperModel', 'TFWhisperPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase = [ 'FlaxWhisperForConditionalGeneration', 'FlaxWhisperModel', 'FlaxWhisperPreTrainedModel', 'FlaxWhisperForAudioClassification', ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys _UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
358
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : Any = batch_size __UpperCAmelCase : Union[str, Any] = seq_length __UpperCAmelCase : int = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[str] = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Tuple = hidden_size __UpperCAmelCase : Union[str, Any] = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : str = intermediate_size __UpperCAmelCase : List[Any] = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : Optional[Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : Dict = type_sequence_label_size __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : Optional[Any] = num_choices __UpperCAmelCase : int = scope def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[Any] = None if self.use_input_mask: __UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = None if self.use_token_type_ids: __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Optional[int] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> List[str]: '''simple docstring''' return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.get_config() __UpperCAmelCase : List[Any] = 300 return config def __A ( self ) -> Dict: '''simple docstring''' ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = self.prepare_config_and_inputs() __UpperCAmelCase : Tuple = True __UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = MraModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) __UpperCAmelCase : List[str] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[Any] = MraModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Any = MraForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int: '''simple docstring''' __UpperCAmelCase : str = MraForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Optional[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str: '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : int = MraForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = self.num_labels __UpperCAmelCase : str = MraForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : int = MraForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : List[str] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : List[Any] = config_and_inputs __UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Any = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : int = False _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Dict = () def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = MraModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> int: '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : List[Any] = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase ) @slow def __A ( self ) -> Any: '''simple docstring''' for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Tuple = MraModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason="""MRA does not output attentions""" ) def __A ( self ) -> List[Any]: '''simple docstring''' return @require_torch class _A ( unittest.TestCase ): @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Tuple = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) __UpperCAmelCase : str = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : List[Any] = model(__UpperCAmelCase )[0] __UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : int = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) __UpperCAmelCase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : int = model(__UpperCAmelCase )[0] __UpperCAmelCase : Union[str, Any] = 50_265 __UpperCAmelCase : Union[str, Any] = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : int = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) __UpperCAmelCase : Dict = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : Any = model(__UpperCAmelCase )[0] __UpperCAmelCase : Dict = 50_265 __UpperCAmelCase : Optional[int] = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , __UpperCAmelCase ) __UpperCAmelCase : str = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
16
0
'''simple docstring''' def lowercase__ ( __lowercase : str ) -> list[list]: """simple docstring""" __UpperCamelCase = current_set.copy() for row_index, row in enumerate(_UpperCAmelCase ): __UpperCamelCase = row[0] for column_index, column in enumerate(_UpperCAmelCase ): if magnitude == 0: __UpperCamelCase = column continue __UpperCamelCase = column / magnitude # Subtract to cancel term __UpperCamelCase = current_set[0] __UpperCamelCase = [first_row] __UpperCamelCase = current_set[1::] for row in current_set: __UpperCamelCase = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(_UpperCAmelCase ) continue for column_index in range(len(_UpperCAmelCase ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(_UpperCAmelCase ) # Create next recursion iteration set if len(final_set[0] ) != 3: __UpperCamelCase = final_set[0] __UpperCamelCase = [] __UpperCamelCase = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) __UpperCamelCase = simplify(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , _UpperCAmelCase ) __UpperCamelCase = resultant return final_set def lowercase__ ( __lowercase : Optional[Any] ) -> list: """simple docstring""" if len(_UpperCAmelCase ) == 0: raise IndexError('solve_simultaneous() requires n lists of length n+1' ) __UpperCamelCase = len(_UpperCAmelCase ) + 1 if any(len(_UpperCAmelCase ) != _length for item in equations ): raise IndexError('solve_simultaneous() requires n lists of length n+1' ) for row in equations: if any(not isinstance(_UpperCAmelCase , (int, float) ) for column in row ): raise ValueError('solve_simultaneous() requires lists of integers' ) if len(_UpperCAmelCase ) == 1: return [equations[0][-1] / equations[0][0]] __UpperCamelCase = equations.copy() if any(0 in row for row in data_set ): __UpperCamelCase = data_set.copy() __UpperCamelCase = [] for row_index, row in enumerate(_UpperCAmelCase ): if 0 not in row: __UpperCamelCase = data_set.pop(_UpperCAmelCase ) break if not full_row: raise ValueError('solve_simultaneous() requires at least 1 full equation' ) data_set.insert(0 , _UpperCAmelCase ) __UpperCamelCase = data_set.copy() __UpperCamelCase = simplify(_UpperCAmelCase ) __UpperCamelCase = simplified[::-1] __UpperCamelCase = [] for row in simplified: __UpperCamelCase = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue __UpperCamelCase = row.copy()[: len(_UpperCAmelCase ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(_UpperCAmelCase ) == 0: solutions.append(0 ) continue __UpperCamelCase = temp_row[1::] __UpperCamelCase = temp_row[::-1] for column_index, column in enumerate(_UpperCAmelCase ): current_solution -= column * solutions[column_index] solutions.append(_UpperCAmelCase ) __UpperCamelCase = [] for item in solutions: final.append(float(round(_UpperCAmelCase , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() a__ : int =[ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
53
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> np.ndarray: '''simple docstring''' lowerCAmelCase : Dict = cva.getAffineTransform(_UpperCAmelCase, _UpperCAmelCase ) return cva.warpAffine(_UpperCAmelCase, _UpperCAmelCase, (rows, cols) ) if __name__ == "__main__": # read original image __A : List[str] = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value __A : int = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __A , __A : Optional[Any] = gray_img.shape # set different points to rotate image __A : int = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __A : Any = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __A : Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __A : List[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __A : List[str] = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __A : Union[str, Any] = plt.figure(1) __A : Optional[Any] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
138
0
from cva import destroyAllWindows, imread, imshow, waitKey def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict: """simple docstring""" a , a = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(snake_case_ ): for j in range(snake_case_ ): a = [2_5_5, 2_5_5, 2_5_5] - img[i][j] return img if __name__ == "__main__": # read original image UpperCamelCase__ : Any = imread("""image_data/lena.jpg""", 1) # convert to its negative UpperCamelCase__ : List[Any] = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
330
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy UpperCamelCase__ : Any = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } UpperCamelCase__ : Union[str, Any] = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } UpperCamelCase__ : str = { """jukebox""": 512, } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_LYRIC_TOKENS_SIZES SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask'] def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any]=["v3", "v2", "v2"] ,__lowerCamelCase : List[Any]=5_12 ,__lowerCamelCase : Tuple=5 ,__lowerCamelCase : List[Any]="<|endoftext|>" ,**__lowerCamelCase : List[str] ,): '''simple docstring''' a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else unk_token super().__init__( unk_token=__lowerCamelCase ,n_genres=__lowerCamelCase ,version=__lowerCamelCase ,max_n_lyric_tokens=__lowerCamelCase ,**__lowerCamelCase ,) a = version a = max_n_lyric_tokens a = n_genres with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) a = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: a = oov.replace(r'''\-\'''' ,r'''\-+\'''' ) a = regex.compile(__lowerCamelCase ) a = {v: k for k, v in self.artists_encoder.items()} a = {v: k for k, v in self.genres_encoder.items()} a = {v: k for k, v in self.lyrics_encoder.items()} @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ): '''simple docstring''' a = [self.artists_encoder.get(__lowerCamelCase ,0 ) for artist in list_artists] for genres in range(len(__lowerCamelCase ) ): a = [self.genres_encoder.get(__lowerCamelCase ,0 ) for genre in list_genres[genres]] a = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) a = [[self.lyrics_encoder.get(__lowerCamelCase ,0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : List[str] ): '''simple docstring''' return list(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' a , a , a = self.prepare_for_tokenization(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a = self._tokenize(__lowerCamelCase ) return artist, genre, lyrics def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : bool = False ): '''simple docstring''' for idx in range(len(self.version ) ): if self.version[idx] == "v3": a = artists[idx].lower() a = [genres[idx].lower()] else: a = self._normalize(artists[idx] ) + '''.v2''' a = [ self._normalize(__lowerCamelCase ) + '''.v2''' for genre in genres[idx].split('''_''' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' ) a = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n''' a = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )} a = 0 a = len(__lowerCamelCase ) + 1 a = self.vocab a = {v: k for k, v in self.vocab.items()} a = '''''' else: a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' ) a = self._run_strip_accents(__lowerCamelCase ) a = lyrics.replace('''\\''' ,'''\n''' ) a = self.out_of_vocab.sub('''''' ,__lowerCamelCase ), [], [] return artists, genres, lyrics def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : int ): '''simple docstring''' a = unicodedata.normalize('''NFD''' ,__lowerCamelCase ) a = [] for char in text: a = unicodedata.category(__lowerCamelCase ) if cat == "Mn": continue output.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : str ): '''simple docstring''' a = ( [chr(__lowerCamelCase ) for i in range(ord('''a''' ) ,ord('''z''' ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord('''A''' ) ,ord('''Z''' ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord('''0''' ) ,ord('''9''' ) + 1 )] + ['''.'''] ) a = frozenset(__lowerCamelCase ) a = re.compile(r'''_+''' ) a = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] ) a = pattern.sub('''_''' ,__lowerCamelCase ).strip('''_''' ) return text def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[str] ): '''simple docstring''' return " ".join(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,__lowerCamelCase : bool = False ): '''simple docstring''' if not isinstance(__lowerCamelCase ,__lowerCamelCase ): a = TensorType(__lowerCamelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( '''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' ) import tensorflow as tf a = tf.constant a = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' ) import torch a = torch.tensor a = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' ) import jax.numpy as jnp # noqa: F811 a = jnp.array a = _is_jax else: a = np.asarray a = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: a = [inputs] if not is_tensor(__lowerCamelCase ): a = as_tensor(__lowerCamelCase ) except: # noqa E722 raise ValueError( '''Unable to create tensor, you should probably activate truncation and/or padding ''' '''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' ) return inputs def __call__( self : Tuple ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str]="" ,__lowerCamelCase : List[Any]="pt" ): '''simple docstring''' a = [0, 0, 0] a = [artist] * len(self.version ) a = [genres] * len(self.version ) a , a , a = self.tokenize(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a , a , a = self._convert_token_to_id(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a = [-INFINITY] * len(full_tokens[-1] ) a = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=__lowerCamelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} ) def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.artists_encoder ,ensure_ascii=__lowerCamelCase ) ) a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.genres_encoder ,ensure_ascii=__lowerCamelCase ) ) a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=__lowerCamelCase ) ) return (artists_file, genres_file, lyrics_file) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : str ): '''simple docstring''' a = self.artists_decoder.get(__lowerCamelCase ) a = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index] a = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index] return artist, genres, lyrics
330
1
"""simple docstring""" from __future__ import annotations import typing from collections.abc import Iterable import numpy as np lowercase__ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 lowercase__ : List[Any] = typing.Union[np.floataa, int, float] # noqa: UP007 def UpperCamelCase_ ( lowerCAmelCase__ : Vector , lowerCAmelCase__ : Vector ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(lowerCAmelCase__ ) - np.asarray(lowerCAmelCase__ )) ** 2 ) ) def UpperCamelCase_ ( lowerCAmelCase__ : Vector , lowerCAmelCase__ : Vector ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) ** (1 / 2) if __name__ == "__main__": def UpperCamelCase_ ( ) -> None: """simple docstring""" from timeit import timeit print('Without Numpy' ) print( timeit( 'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) ) print('With Numpy' ) print( timeit( 'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=1_0000 , globals=globals() , ) ) benchmark()
224
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=lowercase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = field(default="""summarization""", metadata={"""include_in_asdict_even_if_is_default""": True} ) _SCREAMING_SNAKE_CASE = Features({"""text""": Value("""string""" )} ) _SCREAMING_SNAKE_CASE = Features({"""summary""": Value("""string""" )} ) _SCREAMING_SNAKE_CASE = "text" _SCREAMING_SNAKE_CASE = "summary" @property def SCREAMING_SNAKE_CASE__ ( self : Dict ): return {self.text_column: "text", self.summary_column: "summary"}
224
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCamelCase : Dict = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Any = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
369
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=13 , A_=3 , A_=224 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , ): '''simple docstring''' UpperCamelCase : Any = size if size is not None else {"height": 18, "width": 18} UpperCamelCase : Tuple = parent UpperCamelCase : Tuple = batch_size UpperCamelCase : Tuple = num_channels UpperCamelCase : str = image_size UpperCamelCase : Optional[int] = min_resolution UpperCamelCase : List[Any] = max_resolution UpperCamelCase : Union[str, Any] = do_resize UpperCamelCase : str = size UpperCamelCase : List[str] = do_normalize UpperCamelCase : Any = image_mean UpperCamelCase : int = image_std def __UpperCamelCase( self ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Tuple = ViTImageProcessor if is_vision_available() else None def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = EfficientFormerImageProcessorTester(self ) @property def __UpperCamelCase( self ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ , "image_mean" ) ) self.assertTrue(hasattr(A_ , "image_std" ) ) self.assertTrue(hasattr(A_ , "do_normalize" ) ) self.assertTrue(hasattr(A_ , "do_resize" ) ) self.assertTrue(hasattr(A_ , "size" ) ) def __UpperCamelCase( self ): '''simple docstring''' pass def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ , Image.Image ) # Test not batched input UpperCamelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched UpperCamelCase : Tuple = image_processor(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , np.ndarray ) # Test not batched input UpperCamelCase : Dict = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched UpperCamelCase : Optional[Any] = image_processor(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=A_ , torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ , torch.Tensor ) # Test not batched input UpperCamelCase : Any = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched UpperCamelCase : Any = image_processor(A_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
140
0
from typing import List, Optional, Union import numpy as np import torch import torchaudio.compliance.kaldi as ta_kaldi from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __snake_case :str = logging.get_logger(__name__) class _A ( __UpperCAmelCase ): UpperCamelCase__ : Optional[Any] = ['''input_features''', '''attention_mask'''] def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]=80 , __SCREAMING_SNAKE_CASE : List[str]=16_000 , __SCREAMING_SNAKE_CASE : Dict=80 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Tuple=True , **__SCREAMING_SNAKE_CASE : Any , ): '''simple docstring''' super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) __a = num_mel_bins __a = do_ceptral_normalize __a = normalize_means __a = normalize_vars __a = True def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : np.ndarray , ): '''simple docstring''' __a = waveform * (2**15) # Kaldi compliance: 16-bit signed integers __a = torch.from_numpy(__SCREAMING_SNAKE_CASE).unsqueeze(0) __a = ta_kaldi.fbank(__SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate) return features.numpy() @staticmethod def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : float = 0.0 , ): '''simple docstring''' if normalize_means: __a = x[:input_length].mean(axis=0) __a = np.subtract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) if normalize_vars: __a = x[:input_length].std(axis=0) __a = np.divide(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) if input_length < x.shape[0]: __a = padding_value # make sure array is in float32 __a = x.astype(np.floataa) return x def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[np.ndarray] , __SCREAMING_SNAKE_CASE : Optional[np.ndarray] = None): '''simple docstring''' __a = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features] return [ self.utterance_cmvn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value) for x, n in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) ] def __call__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , **__SCREAMING_SNAKE_CASE : Dict , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.') else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''') __a = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F'Only mono-channel audio is supported for input to {self}') __a = is_batched_numpy or ( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: __a = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray): __a = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): __a = raw_speech.astype(np.floataa) # always return batch if not is_batched: __a = [raw_speech] # extract fbank features __a = [self._extract_fbank_features(__SCREAMING_SNAKE_CASE) for waveform in raw_speech] # convert into correct format for padding __a = BatchFeature({'''input_features''': features}) __a = self.pad( __SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) # make sure list is in array format __a = padded_inputs.get('''input_features''') if isinstance(input_features[0] , __SCREAMING_SNAKE_CASE): __a = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa) for feature in input_features] __a = padded_inputs.get('''attention_mask''') if attention_mask is not None: __a = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.intaa) for array in attention_mask] # Utterance-level cepstral mean and variance normalization if self.do_ceptral_normalize: __a = ( np.array(__SCREAMING_SNAKE_CASE , dtype=np.intaa) if self._get_padding_strategies(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE) is not PaddingStrategy.DO_NOT_PAD else None ) __a = self.normalize( padded_inputs['''input_features'''] , attention_mask=__SCREAMING_SNAKE_CASE) if return_tensors is not None: __a = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE) return padded_inputs
49
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") a : int = logging.getLogger(__name__) @dataclass class a : """simple docstring""" a : Optional[int] = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) a : bool = field( default=lowercase__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) a : bool = field( default=lowercase__ , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) a : Optional[int] = field( default=lowercase__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) a : Optional[int] = field( default=lowercase__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) a : Optional[int] = field( default=lowercase__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) } , ) @dataclass class a : """simple docstring""" a : str = field( default=lowercase__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) a : str = field( default=lowercase__ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} ) a : Optional[str] = field( default=lowercase__ , metadata={'help': 'Train language if it is different from the evaluation language.'} ) a : Optional[str] = field( default=lowercase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) a : Optional[str] = field( default=lowercase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) a : Optional[str] = field( default=lowercase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) a : Optional[bool] = field( default=lowercase__ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , ) a : bool = field( default=lowercase__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) a : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) a : bool = field( default=lowercase__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) a : bool = field( default=lowercase__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , ) def lowerCamelCase__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __UpperCAmelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_xnli""" , __lowerCamelCase ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __UpperCAmelCase : List[Any] = training_args.get_process_log_level() logger.setLevel(__lowerCamelCase ) datasets.utils.logging.set_verbosity(__lowerCamelCase ) transformers.utils.logging.set_verbosity(__lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __UpperCAmelCase : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: __UpperCAmelCase : Tuple = load_dataset( """xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: __UpperCAmelCase : List[Any] = load_dataset( """xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __UpperCAmelCase : str = train_dataset.features["""label"""].names if training_args.do_eval: __UpperCAmelCase : Any = load_dataset( """xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __UpperCAmelCase : str = eval_dataset.features["""label"""].names if training_args.do_predict: __UpperCAmelCase : Optional[Any] = load_dataset( """xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __UpperCAmelCase : List[str] = predict_dataset.features["""label"""].names # Labels __UpperCAmelCase : Tuple = len(__lowerCamelCase ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __UpperCAmelCase : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel={str(__lowerCamelCase ): label for i, label in enumerate(__lowerCamelCase )} , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: __UpperCAmelCase : List[Any] = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __UpperCAmelCase : List[Any] = False def preprocess_function(__lowerCamelCase : int ): # Tokenize the texts return tokenizer( examples["""premise"""] , examples["""hypothesis"""] , padding=__lowerCamelCase , max_length=data_args.max_seq_length , truncation=__lowerCamelCase , ) if training_args.do_train: if data_args.max_train_samples is not None: __UpperCAmelCase : int = min(len(__lowerCamelCase ) , data_args.max_train_samples ) __UpperCAmelCase : Dict = train_dataset.select(range(__lowerCamelCase ) ) with training_args.main_process_first(desc="""train dataset map pre-processing""" ): __UpperCAmelCase : Union[str, Any] = train_dataset.map( __lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , ) # Log a few random samples from the training set: for index in random.sample(range(len(__lowerCamelCase ) ) , 3 ): logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" ) if training_args.do_eval: if data_args.max_eval_samples is not None: __UpperCAmelCase : Tuple = min(len(__lowerCamelCase ) , data_args.max_eval_samples ) __UpperCAmelCase : List[str] = eval_dataset.select(range(__lowerCamelCase ) ) with training_args.main_process_first(desc="""validation dataset map pre-processing""" ): __UpperCAmelCase : Dict = eval_dataset.map( __lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , ) if training_args.do_predict: if data_args.max_predict_samples is not None: __UpperCAmelCase : Dict = min(len(__lowerCamelCase ) , data_args.max_predict_samples ) __UpperCAmelCase : Tuple = predict_dataset.select(range(__lowerCamelCase ) ) with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ): __UpperCAmelCase : Any = predict_dataset.map( __lowerCamelCase , batched=__lowerCamelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , ) # Get the metric function __UpperCAmelCase : Tuple = evaluate.load("""xnli""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(__lowerCamelCase : EvalPrediction ): __UpperCAmelCase : Optional[Any] = p.predictions[0] if isinstance(p.predictions , __lowerCamelCase ) else p.predictions __UpperCAmelCase : str = np.argmax(__lowerCamelCase , axis=1 ) return metric.compute(predictions=__lowerCamelCase , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __UpperCAmelCase : Any = default_data_collator elif training_args.fpaa: __UpperCAmelCase : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) else: __UpperCAmelCase : int = None # Initialize our Trainer __UpperCAmelCase : Union[str, Any] = Trainer( model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__lowerCamelCase , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , ) # Training if training_args.do_train: __UpperCAmelCase : List[str] = None if training_args.resume_from_checkpoint is not None: __UpperCAmelCase : Optional[int] = training_args.resume_from_checkpoint elif last_checkpoint is not None: __UpperCAmelCase : Union[str, Any] = last_checkpoint __UpperCAmelCase : Any = trainer.train(resume_from_checkpoint=__lowerCamelCase ) __UpperCAmelCase : Dict = train_result.metrics __UpperCAmelCase : Optional[Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase ) ) __UpperCAmelCase : Dict = min(__lowerCamelCase , len(__lowerCamelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , __lowerCamelCase ) trainer.save_metrics("""train""" , __lowerCamelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __UpperCAmelCase : Dict = trainer.evaluate(eval_dataset=__lowerCamelCase ) __UpperCAmelCase : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase ) __UpperCAmelCase : Tuple = min(__lowerCamelCase , len(__lowerCamelCase ) ) trainer.log_metrics("""eval""" , __lowerCamelCase ) trainer.save_metrics("""eval""" , __lowerCamelCase ) # Prediction if training_args.do_predict: logger.info("""*** Predict ***""" ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = trainer.predict(__lowerCamelCase , metric_key_prefix="""predict""" ) __UpperCAmelCase : int = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(__lowerCamelCase ) ) __UpperCAmelCase : Optional[int] = min(__lowerCamelCase , len(__lowerCamelCase ) ) trainer.log_metrics("""predict""" , __lowerCamelCase ) trainer.save_metrics("""predict""" , __lowerCamelCase ) __UpperCAmelCase : Optional[int] = np.argmax(__lowerCamelCase , axis=1 ) __UpperCAmelCase : Tuple = os.path.join(training_args.output_dir , """predictions.txt""" ) if trainer.is_world_process_zero(): with open(__lowerCamelCase , """w""" ) as writer: writer.write("""index\tprediction\n""" ) for index, item in enumerate(__lowerCamelCase ): __UpperCAmelCase : Tuple = label_list[item] writer.write(f"""{index}\t{item}\n""" ) if __name__ == "__main__": main()
114
0
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCAmelCase_ = { """tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""", """tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""", """base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""", """base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""", """small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""", """small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""", """medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""", """medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""", """large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""", """large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""", } def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> Any: _snake_case : Union[str, Any] = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(lowerCAmelCase , lowerCAmelCase ) lowerCAmelCase_ = { """blocks""": """layers""", """mlp.0""": """fc1""", """mlp.2""": """fc2""", """mlp_ln""": """final_layer_norm""", """.attn.query""": """.self_attn.q_proj""", """.attn.key""": """.self_attn.k_proj""", """.attn.value""": """.self_attn.v_proj""", """.attn_ln""": """.self_attn_layer_norm""", """.attn.out""": """.self_attn.out_proj""", """.cross_attn.query""": """.encoder_attn.q_proj""", """.cross_attn.key""": """.encoder_attn.k_proj""", """.cross_attn.value""": """.encoder_attn.v_proj""", """.cross_attn_ln""": """.encoder_attn_layer_norm""", """.cross_attn.out""": """.encoder_attn.out_proj""", """decoder.ln.""": """decoder.layer_norm.""", """encoder.ln.""": """encoder.layer_norm.""", """token_embedding""": """embed_tokens""", """encoder.positional_embedding""": """encoder.embed_positions.weight""", """decoder.positional_embedding""": """decoder.embed_positions.weight""", """ln_post""": """layer_norm""", } def lowerCamelCase_ ( lowerCAmelCase: List[Any] )-> Union[str, Any]: _snake_case : List[str] = list(s_dict.keys() ) for key in keys: _snake_case : Any = key for k, v in WHISPER_MAPPING.items(): if k in key: _snake_case : Dict = new_key.replace(lowerCAmelCase , lowerCAmelCase ) print(F"""{key} -> {new_key}""" ) _snake_case : Dict = s_dict.pop(lowerCAmelCase ) return s_dict def lowerCamelCase_ ( lowerCAmelCase: List[str] )-> Tuple: _snake_case : List[str] = emb.weight.shape _snake_case : Optional[int] = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase ) _snake_case : str = emb.weight.data return lin_layer def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str )-> bytes: os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase ) _snake_case : Tuple = os.path.basename(lowerCAmelCase ) _snake_case : List[str] = url.split('/' )[-2] _snake_case : str = os.path.join(lowerCAmelCase , lowerCAmelCase ) if os.path.exists(lowerCAmelCase ) and not os.path.isfile(lowerCAmelCase ): raise RuntimeError(F"""{download_target} exists and is not a regular file""" ) if os.path.isfile(lowerCAmelCase ): _snake_case : List[Any] = open(lowerCAmelCase , 'rb' ).read() if hashlib.shaaaa(lowerCAmelCase ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" ) with urllib.request.urlopen(lowerCAmelCase ) as source, open(lowerCAmelCase , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=80 , unit='iB' , unit_scale=lowerCAmelCase , unit_divisor=10_24 ) as loop: while True: _snake_case : Tuple = source.read(81_92 ) if not buffer: break output.write(lowerCAmelCase ) loop.update(len(lowerCAmelCase ) ) _snake_case : List[Any] = open(lowerCAmelCase , 'rb' ).read() if hashlib.shaaaa(lowerCAmelCase ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def lowerCamelCase_ ( lowerCAmelCase: int , lowerCAmelCase: Dict )-> List[Any]: if ".pt" not in checkpoint_path: _snake_case : Union[str, Any] = _download(_MODELS[checkpoint_path] ) else: _snake_case : str = torch.load(lowerCAmelCase , map_location='cpu' ) _snake_case : List[Any] = original_checkpoint['dims'] _snake_case : Union[str, Any] = original_checkpoint['model_state_dict'] _snake_case : str = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(lowerCAmelCase ) rename_keys(lowerCAmelCase ) _snake_case : Optional[int] = True _snake_case : Optional[int] = state_dict['decoder.layers.0.fc1.weight'].shape[0] _snake_case : Union[str, Any] = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=lowerCAmelCase , decoder_ffn_dim=lowerCAmelCase , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) _snake_case : Tuple = WhisperForConditionalGeneration(lowerCAmelCase ) _snake_case : Any = model.model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase ) if len(lowerCAmelCase ) > 0 and not set(lowerCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F""" but all the following weights are missing {missing}""" ) if tie_embeds: _snake_case : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _snake_case : Tuple = proj_out_weights model.save_pretrained(lowerCAmelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") lowerCAmelCase_ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
357
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase_ = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
260
0
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py __lowercase = '''.''' if __name__ == "__main__": __lowercase = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''') __lowercase = [] __lowercase = [] with open(doctest_file_path) as fp: for line in fp: __lowercase = line.strip() __lowercase = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: __lowercase = '''\n'''.join(non_existent_paths) raise ValueError(F'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}') if all_paths != sorted(all_paths): raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
43
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __lowercase = 16 __lowercase = 32 def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" ): '''simple docstring''' __UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) __UpperCamelCase :Any = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase :int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCamelCase :Tuple = datasets.map( SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __UpperCamelCase :Union[str, Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE ) __UpperCamelCase :Dict = DataLoader( tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase :int = config['''lr'''] __UpperCamelCase :str = int(config['''num_epochs'''] ) __UpperCamelCase :Any = int(config['''seed'''] ) __UpperCamelCase :Dict = int(config['''batch_size'''] ) __UpperCamelCase :Optional[Any] = args.model_name_or_path set_seed(SCREAMING_SNAKE_CASE ) __UpperCamelCase , __UpperCamelCase :Dict = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase :Any = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE ) # Instantiate optimizer __UpperCamelCase :List[str] = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE ) if accelerator.state.deepspeed_plugin is not None: __UpperCamelCase :Dict = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: __UpperCamelCase :Dict = 1 __UpperCamelCase :Tuple = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __UpperCamelCase :str = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , ) else: __UpperCamelCase :Dict = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = accelerator.prepare( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # We need to keep track of how many total steps we have iterated over __UpperCamelCase :List[Any] = 0 # We also need to keep track of the stating epoch so files are named properly __UpperCamelCase :Dict = 0 # Now we train the model __UpperCamelCase :Any = evaluate.load('''glue''' , '''mrpc''' ) __UpperCamelCase :Union[str, Any] = 0 __UpperCamelCase :Optional[int] = {} for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE ): __UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE ) __UpperCamelCase :Tuple = outputs.loss __UpperCamelCase :str = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __UpperCamelCase :Any = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase :Any = model(**SCREAMING_SNAKE_CASE ) __UpperCamelCase :Optional[int] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(SCREAMING_SNAKE_CASE ) - 1: __UpperCamelCase :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen] __UpperCamelCase :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , ) __UpperCamelCase :Dict = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE ) __UpperCamelCase :str = eval_metric['''accuracy'''] if best_performance < eval_metric["accuracy"]: __UpperCamelCase :int = eval_metric['''accuracy'''] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE , ) parser.add_argument( '''--output_dir''' , type=SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--performance_lower_bound''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , ) parser.add_argument( '''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''Number of train epochs.''' , ) __UpperCamelCase :List[str] = parser.parse_args() __UpperCamelCase :Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
43
1
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : Any , __magic_name__ : int ) -> Optional[Any]: """simple docstring""" UpperCamelCase :Any = 0 if start < end: UpperCamelCase :Any = randint(snake_case_ , snake_case_ ) UpperCamelCase :str = a[end] UpperCamelCase :Tuple = a[pivot] UpperCamelCase :Dict = temp UpperCamelCase , UpperCamelCase :List[Any] = _in_place_partition(snake_case_ , snake_case_ , snake_case_ ) count += _in_place_quick_sort(snake_case_ , snake_case_ , p - 1 ) count += _in_place_quick_sort(snake_case_ , p + 1 , snake_case_ ) return count def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" UpperCamelCase :str = 0 UpperCamelCase :Optional[Any] = randint(snake_case_ , snake_case_ ) UpperCamelCase :Optional[Any] = a[end] UpperCamelCase :int = a[pivot] UpperCamelCase :Optional[int] = temp UpperCamelCase :Any = start - 1 for index in range(snake_case_ , snake_case_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value UpperCamelCase :List[str] = new_pivot_index + 1 UpperCamelCase :Optional[int] = a[new_pivot_index] UpperCamelCase :Union[str, Any] = a[index] UpperCamelCase :Dict = temp UpperCamelCase :Tuple = a[new_pivot_index + 1] UpperCamelCase :Any = a[end] UpperCamelCase :List[str] = temp return new_pivot_index + 1, count UpperCAmelCase_ : List[str] = TemporaryFile() UpperCAmelCase_ : int = 1_00 # 1000 elements are to be sorted UpperCAmelCase_ : str = 0, 1 # mean and standard deviation UpperCAmelCase_ : List[str] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array UpperCAmelCase_ : str = np.load(outfile) UpperCAmelCase_ : List[Any] = len(M) - 1 UpperCAmelCase_ : Dict = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
367
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase_ : Union[str, Any] = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Dict = [ '''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwinForImageClassification''', '''SwinForMaskedImageModeling''', '''SwinModel''', '''SwinPreTrainedModel''', '''SwinBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Union[str, Any] = [ '''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSwinForImageClassification''', '''TFSwinForMaskedImageModeling''', '''TFSwinModel''', '''TFSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys UpperCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
62
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __UpperCamelCase ( unittest.TestCase ): @property def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), ) return model def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.dummy_uncond_unet lowerCamelCase_ =ScoreSdeVeScheduler() lowerCamelCase_ =ScoreSdeVePipeline(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) sde_ve.to(lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase ).images lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =sde_ve(num_inference_steps=2, output_type='''numpy''', generator=lowerCAmelCase, return_dict=lowerCAmelCase )[ 0 ] lowerCamelCase_ =image[0, -3:, -3:, -1] lowerCamelCase_ =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class __UpperCamelCase ( unittest.TestCase ): def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''google/ncsnpp-church-256''' lowerCamelCase_ =UNetaDModel.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =ScoreSdeVeScheduler.from_pretrained(lowerCAmelCase ) lowerCamelCase_ =ScoreSdeVePipeline(unet=lowerCAmelCase, scheduler=lowerCAmelCase ) sde_ve.to(lowerCAmelCase ) sde_ve.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =torch.manual_seed(0 ) lowerCamelCase_ =sde_ve(num_inference_steps=10, output_type='''numpy''', generator=lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCamelCase_ =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
75
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _UpperCAmelCase ( ) -> Tuple: _lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png""" _lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" ) return image def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict: _lowerCAmelCase : str = [] # fmt: off # vision encoder rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") ) rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") ) rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") ) rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") ) rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) ) rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') ) rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') ) # QFormer rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") ) rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") ) # fmt: on return rename_keys def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]: _lowerCAmelCase : str = dct.pop(_lowerCamelCase ) _lowerCAmelCase : str = val def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' ) _lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' ) # next, set bias in the state dict _lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) ) _lowerCAmelCase : str = qkv_bias def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]: _lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24 _lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: _lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict() elif "opt-6.7b" in model_name: _lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict() elif "t5-xl" in model_name: _lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict() _lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase ) return config, image_size @torch.no_grad() def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]: _lowerCAmelCase : int = ( AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" ) if """opt""" in model_name else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" ) ) _lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0] _lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase ) _lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval() _lowerCAmelCase : Union[str, Any] = { """blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""), """blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""), """blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""), """blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""), """blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""), """blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""), """blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""), } _lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name] # load original model print("""Loading original model...""" ) _lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu""" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess( name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase ) original_model.eval() print("""Done!""" ) # update state dict keys _lowerCAmelCase : List[Any] = original_model.state_dict() _lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase ) if key.startswith("""Qformer.bert""" ): _lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" ) if "attention.self" in key: _lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" ) if "opt_proj" in key: _lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" ) if "t5_proj" in key: _lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" ) if key.startswith("""opt""" ): _lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" ) if key.startswith("""t5""" ): _lowerCAmelCase : int = key.replace("""t5""" , """language""" ) _lowerCAmelCase : Tuple = val # read in qv biases read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase ) assert len(_lowerCamelCase ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] _lowerCAmelCase : Union[str, Any] = load_demo_image() _lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase ) _lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase ) # create processor _lowerCAmelCase : Optional[int] = BlipImageProcessor( size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase ) _lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase ) _lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase ) # make sure processor creates exact same pixel values assert torch.allclose(_lowerCamelCase , _lowerCamelCase ) original_model.to(_lowerCamelCase ) hf_model.to(_lowerCamelCase ) with torch.no_grad(): if "opt" in model_name: _lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits _lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits else: _lowerCAmelCase : List[Any] = original_model( {"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits _lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) _lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits assert original_logits.shape == logits.shape print("""First values of original logits:""" , original_logits[0, :3, :3] ) print("""First values of HF logits:""" , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": _lowerCAmelCase : Any = torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase ) assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 ) elif model_name == "blip2-flan-t5-xl-coco": _lowerCAmelCase : List[Any] = torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase ) else: # cast to same type _lowerCAmelCase : Union[str, Any] = logits.dtype assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 ) print("""Looks ok!""" ) print("""Generating a caption...""" ) _lowerCAmelCase : Optional[int] = """""" _lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase ) _lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} ) _lowerCAmelCase : Dict = hf_model.generate( _lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print("""Original generation:""" , _lowerCamelCase ) _lowerCAmelCase : int = input_ids.shape[1] _lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase ) _lowerCAmelCase : List[str] = [text.strip() for text in output_text] print("""HF generation:""" , _lowerCamelCase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_lowerCamelCase ) hf_model.save_pretrained(_lowerCamelCase ) if push_to_hub: processor.push_to_hub(f'nielsr/{model_name}' ) hf_model.push_to_hub(f'nielsr/{model_name}' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() UpperCamelCase_ = [ """blip2-opt-2.7b""", """blip2-opt-6.7b""", """blip2-opt-2.7b-coco""", """blip2-opt-6.7b-coco""", """blip2-flan-t5-xl""", """blip2-flan-t5-xl-coco""", """blip2-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""blip2-opt-2.7b""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) UpperCamelCase_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
309
0
from __future__ import annotations def a( A : list[float] ) -> float: """simple docstring""" a = 0.00 a = 0 for resistor in resistors: if resistor <= 0: a = f'''Resistor at index {index} has a negative or zero value!''' raise ValueError(_lowerCamelCase ) first_sum += 1 / float(_lowerCamelCase ) index += 1 return 1 / first_sum def a( A : list[float] ) -> float: """simple docstring""" a = 0.00 a = 0 for resistor in resistors: sum_r += resistor if resistor < 0: a = f'''Resistor at index {index} has a negative value!''' raise ValueError(_lowerCamelCase ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
370
import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class _lowercase ( lowerCAmelCase ): """simple docstring""" def UpperCamelCase_ (self ): """simple docstring""" a = tempfile.mkdtemp() a = 8 # DPR tok a = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] a = os.path.join(self.tmpdirname , "dpr_tokenizer" ) os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) a = os.path.join(lowerCamelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) # BART tok a = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] a = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] a = {"unk_token": "<unk>"} a = os.path.join(self.tmpdirname , "bart_tokenizer" ) os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) a = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"] ) a = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCamelCase_ ) ) def UpperCamelCase_ (self ): """simple docstring""" return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) ) def UpperCamelCase_ (self ): """simple docstring""" return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) ) def UpperCamelCase_ (self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) @require_tokenizers def UpperCamelCase_ (self ): """simple docstring""" a = os.path.join(self.tmpdirname , "rag_tokenizer" ) a = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) a = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(lowerCamelCase_ ) rag_tokenizer.save_pretrained(lowerCamelCase_ ) a = RagTokenizer.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ ) self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCamelCase_ ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , lowerCamelCase_ ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def UpperCamelCase_ (self ): """simple docstring""" a = RagTokenizer.from_pretrained("facebook/rag-token-nq" ) a = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] a = tokenizer(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @slow def UpperCamelCase_ (self ): """simple docstring""" a = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" ) a = [ "who got the first nobel prize in physics", "when is the next deadpool movie being released", "which mode is used for short wave broadcast service", "who is the owner of reading football club", "when is the next scandal episode coming out", "when is the last time the philadelphia won the superbowl", "what is the most current adobe flash player version", "how many episodes are there in dragon ball z", "what is the first step in the evolution of the eye", "where is gall bladder situated in human body", "what is the main mineral in lithium batteries", "who is the president of usa right now", "where do the greasers live in the outsiders", "panda is a national animal of which country", "what is the name of manchester united stadium", ] a = tokenizer(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ )
71
0
"""simple docstring""" import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin lowerCamelCase_ : Dict = ''' Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] ''' class __A ( unittest.TestCase, snake_case_ ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a =load_tool('''text-question-answering''' ) self.tool.setup() a =load_tool('''text-question-answering''' , remote=__snake_case ) def SCREAMING_SNAKE_CASE ( self ) -> int: a =self.tool(__snake_case , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(__snake_case , '''launched the BigScience Research Workshop''' ) def SCREAMING_SNAKE_CASE ( self ) -> Any: a =self.remote_tool(__snake_case , '''What did Hugging Face do in April 2021?''' ) self.assertEqual(__snake_case , '''launched the BigScience Research Workshop''' ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a =self.tool(text=__snake_case , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(__snake_case , '''launched the BigScience Research Workshop''' ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a =self.remote_tool(text=__snake_case , question='''What did Hugging Face do in April 2021?''' ) self.assertEqual(__snake_case , '''launched the BigScience Research Workshop''' )
81
'''simple docstring''' from __future__ import annotations import math def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True A__ : Optional[Any] =[num for num in range(3, 10_00_01, 2) if not is_prime(num)] def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" if not isinstance(lowerCAmelCase , lowerCAmelCase ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) _lowerCAmelCase = [] for num in range(len(lowerCAmelCase ) ): _lowerCAmelCase = 0 while 2 * i * i <= odd_composites[num]: _lowerCAmelCase = odd_composites[num] - 2 * i * i if is_prime(lowerCAmelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(lowerCAmelCase ) == n: return list_nums return [] def UpperCamelCase__ ( ): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F"""{solution() = }""")
70
0
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) lowerCamelCase :Dict = { """post_extract_proj""": """feature_projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.upsample.0""": """encoder.upsample.projection""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """layer_norm""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' for attribute in key.split(""".""" ): A_ : Union[str, Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ) if weight_type is not None: A_ : List[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape else: A_ : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": A_ : Optional[int] = value elif weight_type == "weight_g": A_ : Optional[Any] = value elif weight_type == "weight_v": A_ : Dict = value elif weight_type == "bias": A_ : str = value else: A_ : List[str] = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : int = [] A_ : Union[str, Any] = fairseq_model.state_dict() A_ : List[Any] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): A_ : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , ) A_ : str = True else: for key, mapped_key in MAPPING.items(): A_ : Tuple = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: A_ : Any = True if "*" in mapped_key: A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2] A_ : Optional[int] = mapped_key.replace("""*""" , lowerCamelCase__ ) if "weight_g" in name: A_ : Any = """weight_g""" elif "weight_v" in name: A_ : Any = """weight_v""" elif "weight" in name: A_ : Dict = """weight""" elif "bias" in name: A_ : Dict = """bias""" else: A_ : Tuple = None set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) continue if not is_used: unused_weights.append(lowerCamelCase__ ) logger.warning(f'Unused weights: {unused_weights}' ) def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Optional[int] = full_name.split("""conv_layers.""" )[-1] A_ : List[str] = name.split(""".""" ) A_ : int = int(items[0] ) A_ : str = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) A_ : List[Any] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) A_ : str = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) A_ : Tuple = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) A_ : List[Any] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(lowerCamelCase__ ) def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Dict = SEWConfig() if is_finetuned: A_ : str = model.wav_encoder.wav_model.cfg else: A_ : List[Any] = model.cfg A_ : int = fs_config.conv_bias A_ : List[str] = eval(fs_config.conv_feature_layers ) A_ : Tuple = [x[0] for x in conv_layers] A_ : int = [x[1] for x in conv_layers] A_ : Optional[int] = [x[2] for x in conv_layers] A_ : List[Any] = """gelu""" A_ : Any = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" A_ : Optional[Any] = 0.0 A_ : List[Any] = fs_config.activation_fn.name A_ : Optional[int] = fs_config.encoder_embed_dim A_ : List[Any] = 0.02 A_ : Tuple = fs_config.encoder_ffn_embed_dim A_ : str = 1E-5 A_ : Any = fs_config.encoder_layerdrop A_ : Optional[Any] = fs_config.encoder_attention_heads A_ : Optional[int] = fs_config.conv_pos_groups A_ : Optional[Any] = fs_config.conv_pos A_ : str = len(lowerCamelCase__ ) A_ : Any = fs_config.encoder_layers A_ : Dict = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: A_ : Optional[Any] = model.cfg A_ : List[str] = fs_config.final_dropout A_ : Tuple = fs_config.layerdrop A_ : List[str] = fs_config.activation_dropout A_ : List[Any] = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 A_ : str = fs_config.attention_dropout A_ : Optional[Any] = fs_config.dropout_input A_ : Optional[Any] = fs_config.dropout A_ : Dict = fs_config.mask_channel_length A_ : List[str] = fs_config.mask_channel_prob A_ : Tuple = fs_config.mask_length A_ : Union[str, Any] = fs_config.mask_prob A_ : str = """Wav2Vec2FeatureExtractor""" A_ : str = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ): '''simple docstring''' if is_finetuned: A_, A_, A_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: A_, A_, A_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: A_ : Any = SEWConfig.from_pretrained(lowerCamelCase__ ) else: A_ : Optional[Any] = convert_config(model[0] , lowerCamelCase__ ) A_ : Optional[Any] = model[0].eval() A_ : Dict = True if config.feat_extract_norm == """layer""" else False A_ : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ) if is_finetuned: if dict_path: A_ : List[Any] = Dictionary.load(lowerCamelCase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq A_ : Dict = target_dict.pad_index A_ : Dict = target_dict.bos_index A_ : str = target_dict.pad_index A_ : Optional[int] = target_dict.bos_index A_ : str = target_dict.eos_index A_ : Optional[Any] = len(target_dict.symbols ) A_ : List[Any] = os.path.join(lowerCamelCase__ , """vocab.json""" ) if not os.path.isdir(lowerCamelCase__ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase__ ) ) return os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ ) with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , lowerCamelCase__ ) A_ : Union[str, Any] = WavaVecaCTCTokenizer( lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase__ , ) A_ : List[Any] = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) A_ : Tuple = SEWForCTC(lowerCamelCase__ ) else: A_ : Tuple = SEWModel(lowerCamelCase__ ) feature_extractor.save_pretrained(lowerCamelCase__ ) recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) hf_model.save_pretrained(lowerCamelCase__ ) if __name__ == "__main__": lowerCamelCase :Dict = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) lowerCamelCase :Any = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
352
'''simple docstring''' import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowerCAmelCase : def __init__(self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=None , lowercase=2 , ): A_ : List[str] = parent A_ : str = batch_size A_ : Optional[Any] = image_size A_ : List[str] = patch_size A_ : List[str] = num_channels A_ : List[str] = is_training A_ : str = use_labels A_ : List[str] = hidden_size A_ : List[Any] = num_hidden_layers A_ : Any = num_attention_heads A_ : Any = intermediate_size A_ : Optional[Any] = hidden_act A_ : Optional[int] = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : Optional[int] = type_sequence_label_size A_ : Any = initializer_range A_ : int = scope A_ : str = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A_ : Dict = (image_size // patch_size) ** 2 A_ : List[str] = num_patches + 1 def _a (self ): A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Optional[Any] = None if self.use_labels: A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def _a (self ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _a (self , lowercase , lowercase , lowercase ): A_ : List[str] = ViTModel(config=lowercase ) model.to(lowercase ) model.eval() A_ : List[str] = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _a (self , lowercase , lowercase , lowercase ): A_ : List[str] = ViTForMaskedImageModeling(config=lowercase ) model.to(lowercase ) model.eval() A_ : Tuple = model(lowercase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A_ : Union[str, Any] = 1 A_ : Any = ViTForMaskedImageModeling(lowercase ) model.to(lowercase ) model.eval() A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : Optional[int] = model(lowercase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _a (self , lowercase , lowercase , lowercase ): A_ : Dict = self.type_sequence_label_size A_ : str = ViTForImageClassification(lowercase ) model.to(lowercase ) model.eval() A_ : List[str] = model(lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : Any = 1 A_ : str = ViTForImageClassification(lowercase ) model.to(lowercase ) model.eval() A_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : Union[str, Any] = model(lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _a (self ): A_ : str = self.prepare_config_and_inputs() ( ( A_ ), ( A_ ), ( A_ ), ) : Optional[int] = config_and_inputs A_ : Tuple = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE : Any = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ( {'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE : Union[str, Any] = True __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : Optional[int] = False __SCREAMING_SNAKE_CASE : Optional[int] = False def _a (self ): A_ : Any = ViTModelTester(self ) A_ : str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 ) def _a (self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def _a (self ): pass def _a (self ): A_, A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Union[str, Any] = model_class(lowercase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A_ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) ) def _a (self ): A_, A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(lowercase ) A_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : List[str] = [*signature.parameters.keys()] A_ : Dict = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowercase ) def _a (self ): A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def _a (self ): A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowercase ) def _a (self ): A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) @slow def _a (self ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Dict = ViTModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def a ( ): '''simple docstring''' A_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def _a (self ): return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None @slow def _a (self ): A_ : Optional[int] = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(lowercase ) A_ : List[str] = self.default_image_processor A_ : Tuple = prepare_img() A_ : int = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase ) # forward pass with torch.no_grad(): A_ : str = model(**lowercase ) # verify the logits A_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowercase ) A_ : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(lowercase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) ) @slow def _a (self ): # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. A_ : Optional[int] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(lowercase ) A_ : List[Any] = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480 ) A_ : Dict = prepare_img() A_ : str = image_processor(images=lowercase , return_tensors="""pt""" ) A_ : int = inputs.pixel_values.to(lowercase ) # forward pass with torch.no_grad(): A_ : int = model(lowercase , interpolate_pos_encoding=lowercase ) # verify the logits A_ : int = torch.Size((1, 3601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , lowercase ) A_ : List[Any] = torch.tensor( [[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(lowercase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def _a (self ): A_ : List[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" ) A_ : int = self.default_image_processor A_ : Any = prepare_img() A_ : List[str] = image_processor(images=lowercase , return_tensors="""pt""" ) A_ : Any = inputs.pixel_values.to(lowercase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A_ : Optional[Any] = model(lowercase )
135
0
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging UpperCAmelCase_ = logging.get_logger(__name__) class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = ["""audio_values""", """audio_mask"""] def __init__( self : List[str] , _UpperCAmelCase : List[str]=20_48 , _UpperCAmelCase : Dict=1 , _UpperCAmelCase : Tuple=[16, 16] , _UpperCAmelCase : Optional[int]=1_28 , _UpperCAmelCase : Tuple=4_41_00 , _UpperCAmelCase : Optional[Any]=86 , _UpperCAmelCase : Optional[int]=20_48 , _UpperCAmelCase : List[Any]=0.0 , **_UpperCAmelCase : Tuple , ): """simple docstring""" super().__init__( feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase , ) UpperCAmelCase__ = spectrogram_length UpperCAmelCase__ = num_channels UpperCAmelCase__ = patch_size UpperCAmelCase__ = feature_size // self.patch_size[1] UpperCAmelCase__ = n_fft UpperCAmelCase__ = sampling_rate // hop_length_to_sampling_rate UpperCAmelCase__ = sampling_rate UpperCAmelCase__ = padding_value UpperCAmelCase__ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_UpperCAmelCase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=_UpperCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , ).T def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : np.array ): """simple docstring""" UpperCAmelCase__ = spectrogram( _UpperCAmelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) UpperCAmelCase__ = log_spec[:, :-1] UpperCAmelCase__ = log_spec - 20.0 UpperCAmelCase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : int , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[bool] = True , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , **_UpperCAmelCase : Tuple , ): """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' f''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) UpperCAmelCase__ = isinstance(_UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) UpperCAmelCase__ = is_batched_numpy or ( isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: UpperCAmelCase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ): UpperCAmelCase__ = np.asarray(_UpperCAmelCase , dtype=np.floataa ) elif isinstance(_UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): UpperCAmelCase__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: UpperCAmelCase__ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis UpperCAmelCase__ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , _UpperCAmelCase ): UpperCAmelCase__ = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask UpperCAmelCase__ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: UpperCAmelCase__ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] UpperCAmelCase__ = np.array(_UpperCAmelCase ).astype(np.floataa ) # convert into correct format for padding UpperCAmelCase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch UpperCAmelCase__ = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) UpperCAmelCase__ = padded_audio_features * self.padding_value for i in range(len(_UpperCAmelCase ) ): UpperCAmelCase__ = audio_features[i] UpperCAmelCase__ = feature # return as BatchFeature if return_attention_mask: UpperCAmelCase__ = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: UpperCAmelCase__ = {"""audio_values""": padded_audio_features} UpperCAmelCase__ = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase ) return encoded_inputs
346
'''simple docstring''' import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase_ ( lowerCamelCase_ ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,) lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = { """num_train_timesteps""": 10_00, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**_UpperCAmelCase ) return config def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" pass def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(_UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ): """simple docstring""" UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = 10 UpperCAmelCase__ = self.dummy_model() UpperCAmelCase__ = self.dummy_sample_deter scheduler.set_timesteps(_UpperCAmelCase ) for i, t in enumerate(scheduler.prk_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" UpperCAmelCase__ = dict(self.forward_default_kwargs ) UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase ) for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ): scheduler.set_timesteps(_UpperCAmelCase ) elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ): UpperCAmelCase__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] UpperCAmelCase__ = dummy_past_residuals[:] UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_UpperCAmelCase ) UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 ) UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" for t in [1, 5, 10]: self.check_over_forward(time_step=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" UpperCAmelCase__ = 27 for scheduler_class in self.scheduler_classes: UpperCAmelCase__ = self.dummy_sample UpperCAmelCase__ = 0.1 * sample UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.set_timesteps(_UpperCAmelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ = self.scheduler_classes[0] UpperCAmelCase__ = self.get_scheduler_config() UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" UpperCAmelCase__ = self.full_loop() UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1E-2 assert abs(result_mean.item() - 0.2580 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Dict ): """simple docstring""" UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1E-2 assert abs(result_mean.item() - 0.0878 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1E-2 assert abs(result_mean.item() - 0.2995 ) < 1E-3 def SCREAMING_SNAKE_CASE__ ( self : int ): """simple docstring""" UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 ) UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) ) UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1E-2 assert abs(result_mean.item() - 0.2434 ) < 1E-3
346
1
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class lowercase : def __init__( self : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Tuple=13 , _UpperCamelCase : int=7 , _UpperCamelCase : Optional[Any]=True , _UpperCamelCase : Any=True , _UpperCamelCase : Dict=99 , _UpperCamelCase : Union[str, Any]=32 , _UpperCamelCase : List[str]=5 , _UpperCamelCase : int=4 , _UpperCamelCase : List[Any]=37 , _UpperCamelCase : Tuple="gelu" , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : List[str]=50 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : List[str]=None , ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_input_mask SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = scope def __snake_case( self : Tuple ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE = None if self.use_input_mask: SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, input_mask, token_labels def __snake_case( self : List[Any] ) -> Any: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , ) def __snake_case( self : Tuple ) -> Dict: '''simple docstring''' ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def __snake_case( self : int , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , **_UpperCamelCase : List[Any] , ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = BertGenerationEncoder(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase ) SCREAMING_SNAKE_CASE = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __snake_case( self : Any , _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , **_UpperCamelCase : str , ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = BertGenerationEncoder(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , ) SCREAMING_SNAKE_CASE = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __snake_case( self : int , _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , **_UpperCamelCase : Dict , ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = BertGenerationDecoder(config=_UpperCamelCase ).to(_UpperCamelCase ).eval() # first forward pass SCREAMING_SNAKE_CASE = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase , ) SCREAMING_SNAKE_CASE = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 ) SCREAMING_SNAKE_CASE = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )["hidden_states"][0] SCREAMING_SNAKE_CASE = model( _UpperCamelCase , attention_mask=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase , output_hidden_states=_UpperCamelCase , )["hidden_states"][0] # select random slice SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) ) def __snake_case( self : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , *_UpperCamelCase : Tuple , ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = BertGenerationDecoder(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __snake_case( self : List[Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowercase ( a , a , a , unittest.TestCase ): lowercase__ : str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase__ : List[str] = (BertGenerationDecoder,) if is_torch_available() else () lowercase__ : Optional[int] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def __snake_case( self : str ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = BertGenerationEncoderTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 ) def __snake_case( self : Dict ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def __snake_case( self : Dict ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCamelCase ) def __snake_case( self : Union[str, Any] ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE = "bert" self.model_tester.create_and_check_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def __snake_case( self : Optional[Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*_UpperCamelCase ) def __snake_case( self : List[Any] ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCamelCase ) def __snake_case( self : List[str] ) -> Union[str, Any]: '''simple docstring''' ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() SCREAMING_SNAKE_CASE = None self.model_tester.create_and_check_model_as_decoder( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) def __snake_case( self : Optional[Any] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*_UpperCamelCase ) @slow def __snake_case( self : Tuple ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(_UpperCamelCase ) @require_torch class lowercase ( unittest.TestCase ): @slow def __snake_case( self : Tuple ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) SCREAMING_SNAKE_CASE = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(_UpperCamelCase )[0] SCREAMING_SNAKE_CASE = torch.Size([1, 8, 1_024] ) self.assertEqual(output.shape , _UpperCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor( [[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) ) @require_torch class lowercase ( unittest.TestCase ): @slow def __snake_case( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) SCREAMING_SNAKE_CASE = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE = model(_UpperCamelCase )[0] SCREAMING_SNAKE_CASE = torch.Size([1, 8, 50_358] ) self.assertEqual(output.shape , _UpperCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1e-4 ) )
206
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowercase ( unittest.TestCase ): @slow def __snake_case( self : Optional[int] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" ) SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("xlm-roberta-base" ) SCREAMING_SNAKE_CASE = "The dog is cute and lives in the garden house" SCREAMING_SNAKE_CASE = jnp.array([tokenizer.encode(_UpperCamelCase )] ) SCREAMING_SNAKE_CASE = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim SCREAMING_SNAKE_CASE = jnp.array( [[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] ) SCREAMING_SNAKE_CASE = model(_UpperCamelCase )["last_hidden_state"] self.assertEqual(output.shape , _UpperCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , _UpperCamelCase , atol=1e-3 ) )
206
1
from __future__ import annotations def __UpperCamelCase ( lowerCAmelCase__ : list[float] , lowerCAmelCase__ : list[float] ): __a : Dict = sorted(numsa + numsa ) __a , __a : Optional[Any] = divmod(len(lowerCAmelCase__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() lowercase__ =[float(x) for x in input('Enter the elements of first array: ').split()] lowercase__ =[float(x) for x in input('Enter the elements of second array: ').split()] print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
216
import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib lowercase__ ={ 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } lowercase__ =logging.WARNING def __UpperCamelCase ( ): __a : Optional[Any] = os.getenv('''DATASETS_VERBOSITY''' , lowerCAmelCase__ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option DATASETS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys() ) }" ) return _default_log_level def __UpperCamelCase ( ): return __name__.split('''.''' )[0] def __UpperCamelCase ( ): return logging.getLogger(_get_library_name() ) def __UpperCamelCase ( ): # Apply our default configuration to the library root logger. __a : str = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def __UpperCamelCase ( ): __a : Any = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def __UpperCamelCase ( lowerCAmelCase__ : Optional[str] = None ): if name is None: __a : Union[str, Any] = _get_library_name() return logging.getLogger(lowerCAmelCase__ ) def __UpperCamelCase ( ): return _get_library_root_logger().getEffectiveLevel() def __UpperCamelCase ( lowerCAmelCase__ : int ): _get_library_root_logger().setLevel(lowerCAmelCase__ ) def __UpperCamelCase ( ): return set_verbosity(lowerCAmelCase__ ) def __UpperCamelCase ( ): return set_verbosity(lowerCAmelCase__ ) def __UpperCamelCase ( ): return set_verbosity(lowerCAmelCase__ ) def __UpperCamelCase ( ): return set_verbosity(lowerCAmelCase__ ) def __UpperCamelCase ( ): __a : Union[str, Any] = False def __UpperCamelCase ( ): __a : Tuple = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class UpperCamelCase__ : def __init__(self : str , *snake_case_ : str , **snake_case_ : Union[str, Any] ): # pylint: disable=unused-argument __a : Optional[Any] = args[0] if args else None def __iter__(self : List[str] ): return iter(self._iterator ) def __getattr__(self : str , snake_case_ : Optional[Any] ): def empty_fn(*snake_case_ : int , **snake_case_ : int ): # pylint: disable=unused-argument return return empty_fn def __enter__(self : Union[str, Any] ): return self def __exit__(self : str , snake_case_ : List[str] , snake_case_ : int , snake_case_ : Optional[Any] ): return lowercase__ =True class UpperCamelCase__ : def __call__(self : Tuple , *snake_case_ : str , snake_case_ : str=False , **snake_case_ : Dict ): if _tqdm_active and not disable: return tqdm_lib.tqdm(*snake_case_ , **snake_case_ ) else: return EmptyTqdm(*snake_case_ , **snake_case_ ) def lowerCAmelCase (self : Optional[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Optional[Any] ): __a : List[Any] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*snake_case_ , **snake_case_ ) def lowerCAmelCase (self : str ): if _tqdm_active: return tqdm_lib.tqdm.get_lock() lowercase__ =_tqdm_cls() def __UpperCamelCase ( ): global _tqdm_active return bool(_tqdm_active ) def __UpperCamelCase ( ): global _tqdm_active __a : Dict = True def __UpperCamelCase ( ): global _tqdm_active __a : Union[str, Any] = False
216
1
"""simple docstring""" def _lowerCAmelCase ( lowercase_ ): if not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = F"""Input value of [number={number}] must be an integer""" raise TypeError(lowercase_ ) if number < 1: UpperCAmelCase = F"""Input value of [number={number}] must be > 0""" raise ValueError(lowercase_ ) UpperCAmelCase = 1 for i in range(1 , lowercase_ ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
181
"""simple docstring""" import json import os import unittest from typing import Tuple from transformers import WavaVecaPhonemeCTCTokenizer from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput from transformers.testing_utils import require_phonemizer from ...test_tokenization_common import TokenizerTesterMixin @require_phonemizer class A_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" __UpperCamelCase = WavaVecaPhonemeCTCTokenizer __UpperCamelCase = False def UpperCAmelCase__ ( self :Optional[int] ) -> int: super().setUp() UpperCAmelCase = ( '<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː ' 'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː ' 'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 ' 'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ ' 'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ ' 'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ ' 'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ ' 'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ ' 'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ ' 'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ ' 'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ ' 'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ ' 'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4' ).split(' ' ) UpperCAmelCase = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) UpperCAmelCase = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'} UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase_ ) + '\n' ) def UpperCAmelCase__ ( self :Dict , lowercase_ :Any , lowercase_ :Union[str, Any]=False , lowercase_ :int=20 , lowercase_ :Dict=5 ) -> Tuple[str, list]: UpperCAmelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase_ )) for i in range(len(lowercase_ ) )] UpperCAmelCase = list(filter(lambda lowercase_ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=lowercase_ ) , lowercase_ ) ) if max_length is not None and len(lowercase_ ) > max_length: UpperCAmelCase = toks[:max_length] if min_length is not None and len(lowercase_ ) < min_length and len(lowercase_ ) > 0: while len(lowercase_ ) < min_length: UpperCAmelCase = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase = [t[0] for t in toks] # Ensure consistency UpperCAmelCase = tokenizer.decode(lowercase_ , clean_up_tokenization_spaces=lowercase_ ) if " " not in output_txt and len(lowercase_ ) > 1: UpperCAmelCase = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase_ ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase_ ) ) if with_prefix_space: UpperCAmelCase = ' ' + output_txt UpperCAmelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) return output_txt, output_ids def UpperCAmelCase__ ( self :Union[str, Any] , **lowercase_ :Union[str, Any] ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCAmelCase__ ( self :int ) -> str: UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) # check adding a single token tokenizer.add_tokens('xxx' ) UpperCAmelCase = tokenizer('m xxx ɪ' , do_phonemize=lowercase_ ).input_ids self.assertEqual(lowercase_ , [13, 3_92, 17] ) # xxx should be last token tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] ) UpperCAmelCase = tokenizer('m aaa ɪ ccc' , do_phonemize=lowercase_ ).input_ids self.assertEqual(lowercase_ , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa UpperCAmelCase = tokenizer('maɪ c' , do_phonemize=lowercase_ ).input_ids self.assertEqual(lowercase_ , [3, 2_00] ) # mai should be <unk> (=3) def UpperCAmelCase__ ( self :Tuple ) -> Union[str, Any]: UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) UpperCAmelCase = 'Hello how are you' UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' ) self.assertEqual(lowercase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' ) def UpperCAmelCase__ ( self :Dict ) -> int: UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) UpperCAmelCase = 'Hello how are you' UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' ) self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids ) def UpperCAmelCase__ ( self :Optional[Any] ) -> Dict: UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) UpperCAmelCase = 'Hello how are you' UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' ) UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids ) self.assertEqual(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Optional[int] ) -> str: UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) UpperCAmelCase = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98], [24, 22, 5, 24, 22, 5, 77], ] UpperCAmelCase = tokenizer.decode(sample_ids[0] ) UpperCAmelCase = tokenizer.batch_decode(lowercase_ ) self.assertEqual(lowercase_ , batch_tokens[0] ) self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] ) def UpperCAmelCase__ ( self :Any ) -> str: UpperCAmelCase = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) UpperCAmelCase = 'Hello how are you' UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' ) self.assertEqual(lowercase_ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' ) def UpperCAmelCase__ ( self :Any ) -> Any: UpperCAmelCase = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) UpperCAmelCase = 'Hello how are you' UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' ) self.assertEqual(tokenizer(lowercase_ ).input_ids , tokenizer(lowercase_ , do_phonemize=lowercase_ ).input_ids ) def UpperCAmelCase__ ( self :Dict ) -> Union[str, Any]: UpperCAmelCase = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) # fmt: off UpperCAmelCase = [ [11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98], [tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77], ] # fmt: on # decode with word_del_token filter UpperCAmelCase = tokenizer.decode(sample_ids[0] ) UpperCAmelCase = tokenizer.batch_decode(lowercase_ ) self.assertEqual(lowercase_ , batch_tokens[0] ) self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] ) # decode with no word_del_token filter UpperCAmelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=lowercase_ ) UpperCAmelCase = tokenizer.batch_decode(lowercase_ , filter_word_delimiter_token=lowercase_ ) self.assertEqual(lowercase_ , batch_tokens[0] ) self.assertEqual(lowercase_ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] ) def UpperCAmelCase__ ( self :int ) -> int: UpperCAmelCase = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) UpperCAmelCase = 'Hello how are you' UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' ) UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Union[str, Any] ) -> Optional[Any]: UpperCAmelCase = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' ) tokenizer.add_tokens('|' ) UpperCAmelCase = 'Hello how are you' UpperCAmelCase = tokenizer.phonemize(lowercase_ , phonemizer_lang='en-us' ) UpperCAmelCase = tokenizer.decode(tokenizer(lowercase_ ).input_ids , filter_word_delimiter_token=lowercase_ ) self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , lowercase_ ) def UpperCAmelCase__ ( self :int ) -> Optional[Any]: UpperCAmelCase = self.tokenizer_class.from_pretrained( 'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=lowercase_ ) UpperCAmelCase = 'Hello how are you' UpperCAmelCase = tokenizer(lowercase_ , phonemizer_lang='en-us' ).input_ids UpperCAmelCase = tokenizer(lowercase_ , phonemizer_lang='fr-fr' ).input_ids self.assertNotEqual(lowercase_ , lowercase_ ) UpperCAmelCase = tokenizer.decode(lowercase_ ) UpperCAmelCase = tokenizer.decode(lowercase_ ) self.assertEqual(lowercase_ , 'h ə l oʊ h aʊ ɑːɹ j uː' ) self.assertEqual(lowercase_ , 'ɛ l o h aʊ a ʁ j u' ) def UpperCAmelCase__ ( self :int ) -> List[Any]: UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) UpperCAmelCase = 'Hello how Are you' UpperCAmelCase = 'hello how are you' UpperCAmelCase = tokenizer(lowercase_ ).input_ids UpperCAmelCase = tokenizer(lowercase_ ).input_ids self.assertEqual(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Optional[Any] ) -> int: UpperCAmelCase = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' ) tokenizer.add_tokens(['!', '?'] ) tokenizer.add_special_tokens({'cls_token': '$$$'} ) # fmt: off UpperCAmelCase = [ [11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94], [24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94], ] # fmt: on UpperCAmelCase = tokenizer.batch_decode(lowercase_ ) self.assertEqual(lowercase_ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] ) @staticmethod def UpperCAmelCase__ ( lowercase_ :List[str] , lowercase_ :List[str] ) -> List[str]: UpperCAmelCase = [d[key] for d in offsets] return retrieved_list def UpperCAmelCase__ ( self :str ) -> Optional[int]: UpperCAmelCase = self.get_tokenizer(word_delimiter_token='|' ) tokenizer.add_tokens('|' ) # fmt: off # ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ" UpperCAmelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98] # fmt: on UpperCAmelCase = tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ , filter_word_delimiter_token=lowercase_ ) # check Wav2Vec2CTCTokenizerOutput keys for char self.assertEqual(len(outputs.keys() ) , 2 ) self.assertTrue('text' in outputs ) self.assertTrue('char_offsets' in outputs ) self.assertTrue(isinstance(lowercase_ , lowercase_ ) ) # check that order of chars is correct and identical for both outputs self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text ) self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] ) # check that offsets are actually correct for char # 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token, # 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98 self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] ) self.assertListEqual( self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] ) def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]: UpperCAmelCase = self.get_tokenizer(word_delimiter_token='|' ) def check_list_tuples_equal(lowercase_ :List[Any] , lowercase_ :str ): self.assertTrue(isinstance(lowercase_ , lowercase_ ) ) self.assertTrue(isinstance(outputs_list[0] , lowercase_ ) ) # transform list to ModelOutput UpperCAmelCase = WavaVecaPhonemeCTCTokenizerOutput( {k: [d[k] for d in outputs_list] for k in outputs_list[0]} ) self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] ) def recursive_check(lowercase_ :Any , lowercase_ :str ): if isinstance(lowercase_ , lowercase_ ): [recursive_check(lowercase_ , lowercase_ ) for la, la in zip(lowercase_ , lowercase_ )] self.assertEqual(lowercase_ , lowercase_ ) if "char_offsets" in outputs_batch: recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] ) # fmt: off UpperCAmelCase = [ [11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34], [24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34], ] # fmt: on # We assume that `decode` works as expected. All we will check now is # the output type is correct and the output is identical to `decode` # char UpperCAmelCase = tokenizer.batch_decode(lowercase_ , output_char_offsets=lowercase_ ) UpperCAmelCase = [tokenizer.decode(lowercase_ , output_char_offsets=lowercase_ ) for ids in sample_ids] check_list_tuples_equal(lowercase_ , lowercase_ ) @unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' ) def UpperCAmelCase__ ( self :Any ) -> str: pass @unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' ) def UpperCAmelCase__ ( self :str ) -> List[str]: pass @unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' ) def UpperCAmelCase__ ( self :List[str] ) -> int: pass @unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' ) def UpperCAmelCase__ ( self :List[Any] ) -> Optional[int]: pass def UpperCAmelCase__ ( self :int ) -> Optional[Any]: UpperCAmelCase = self.get_tokenizers(do_lower_case=lowercase_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): UpperCAmelCase = tokenizer.vocab_size UpperCAmelCase = len(lowercase_ ) self.assertNotEqual(lowercase_ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) UpperCAmelCase = ['aaaaa bbbbbb', 'cccccccccdddddddd'] UpperCAmelCase = tokenizer.add_tokens(lowercase_ ) UpperCAmelCase = tokenizer.vocab_size UpperCAmelCase = len(lowercase_ ) self.assertNotEqual(lowercase_ , 0 ) self.assertEqual(lowercase_ , lowercase_ ) self.assertEqual(lowercase_ , len(lowercase_ ) ) self.assertEqual(lowercase_ , all_size + len(lowercase_ ) ) UpperCAmelCase = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=lowercase_ ) self.assertGreaterEqual(len(lowercase_ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) UpperCAmelCase = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'} UpperCAmelCase = tokenizer.add_special_tokens(lowercase_ ) UpperCAmelCase = tokenizer.vocab_size UpperCAmelCase = len(lowercase_ ) self.assertNotEqual(lowercase_ , 0 ) self.assertEqual(lowercase_ , lowercase_ ) self.assertEqual(lowercase_ , len(lowercase_ ) ) self.assertEqual(lowercase_ , all_size_a + len(lowercase_ ) ) UpperCAmelCase = tokenizer.encode( '>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=lowercase_ ) self.assertGreaterEqual(len(lowercase_ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) @unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' ) def UpperCAmelCase__ ( self :Tuple ) -> Optional[Any]: pass @unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' ) def UpperCAmelCase__ ( self :int ) -> Any: pass def UpperCAmelCase__ ( self :Tuple ) -> Dict: # The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which # is not the case for Wav2Vec2PhonemeCTCTokenizer. UpperCAmelCase = self.get_tokenizers(fast=lowercase_ , do_lower_case=lowercase_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): UpperCAmelCase = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't'] UpperCAmelCase = tokenizer.convert_tokens_to_string(lowercase_ ) self.assertIsInstance(output['text'] , lowercase_ )
181
1
import math def __UpperCamelCase ( _A ): if not isinstance(_A , _A ): lowerCAmelCase_ = f"Input value of [number={number}] must be an integer" raise TypeError(_A ) if number < 1: lowerCAmelCase_ = f"Input value of [number={number}] must be > 0" raise ValueError(_A ) elif number == 1: return 3 elif number == 2: return 5 else: lowerCAmelCase_ = int(math.log(number // 3 , 2 ) ) + 2 lowerCAmelCase_ = [3, 5] lowerCAmelCase_ = 2 lowerCAmelCase_ = 3 for block in range(1 , _A ): for _ in range(_A ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): _A = 0 try: _A = proth(number) except ValueError: print(f"ValueError: there is no {number}th Proth number") continue print(f"The {number}th Proth number: {value}")
278
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _A = logging.get_logger(__name__) def __UpperCamelCase ( _A ): lowerCAmelCase_ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: lowerCAmelCase_ = 192 lowerCAmelCase_ = 768 lowerCAmelCase_ = 12 lowerCAmelCase_ = 3 lowerCAmelCase_ = [800, 1333] lowerCAmelCase_ = False elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = 330 lowerCAmelCase_ = 14 lowerCAmelCase_ = 6 lowerCAmelCase_ = 1320 elif "yolos_s" in yolos_name: lowerCAmelCase_ = 384 lowerCAmelCase_ = 1536 lowerCAmelCase_ = 12 lowerCAmelCase_ = 6 elif "yolos_b" in yolos_name: lowerCAmelCase_ = [800, 1344] lowerCAmelCase_ = 91 lowerCAmelCase_ = '''huggingface/label-files''' lowerCAmelCase_ = '''coco-detection-id2label.json''' lowerCAmelCase_ = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase_ = {int(_A ): v for k, v in idalabel.items()} lowerCAmelCase_ = idalabel lowerCAmelCase_ = {v: k for k, v in idalabel.items()} return config def __UpperCamelCase ( _A , _A , _A = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" ) lowerCAmelCase_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ = in_proj_weight[: config.hidden_size, :] lowerCAmelCase_ = in_proj_bias[: config.hidden_size] lowerCAmelCase_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ = in_proj_weight[-config.hidden_size :, :] lowerCAmelCase_ = in_proj_bias[-config.hidden_size :] def __UpperCamelCase ( _A ): if "backbone" in name: lowerCAmelCase_ = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: lowerCAmelCase_ = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: lowerCAmelCase_ = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: lowerCAmelCase_ = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: lowerCAmelCase_ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCAmelCase_ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: lowerCAmelCase_ = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: lowerCAmelCase_ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase_ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase_ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase_ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase_ = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: lowerCAmelCase_ = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: lowerCAmelCase_ = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: lowerCAmelCase_ = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __UpperCamelCase ( _A , _A ): for key in orig_state_dict.copy().keys(): lowerCAmelCase_ = orig_state_dict.pop(_A ) if "qkv" in key: lowerCAmelCase_ = key.split('''.''' ) lowerCAmelCase_ = int(key_split[2] ) lowerCAmelCase_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: lowerCAmelCase_ = val[:dim, :] lowerCAmelCase_ = val[ dim : dim * 2, : ] lowerCAmelCase_ = val[-dim:, :] else: lowerCAmelCase_ = val[:dim] lowerCAmelCase_ = val[dim : dim * 2] lowerCAmelCase_ = val[-dim:] else: lowerCAmelCase_ = val return orig_state_dict def __UpperCamelCase ( ): lowerCAmelCase_ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase_ = Image.open(requests.get(_A , stream=_A ).raw ) return im @torch.no_grad() def __UpperCamelCase ( _A , _A , _A , _A = False ): lowerCAmelCase_ = get_yolos_config(_A ) # load original state_dict lowerCAmelCase_ = torch.load(_A , map_location='''cpu''' )['''model'''] # load 🤗 model lowerCAmelCase_ = YolosForObjectDetection(_A ) model.eval() lowerCAmelCase_ = convert_state_dict(_A , _A ) model.load_state_dict(_A ) # Check outputs on an image, prepared by YolosImageProcessor lowerCAmelCase_ = 800 if yolos_name != '''yolos_ti''' else 512 lowerCAmelCase_ = YolosImageProcessor(format='''coco_detection''' , size=_A ) lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors='''pt''' ) lowerCAmelCase_ = model(**_A ) lowerCAmelCase_ , lowerCAmelCase_ = outputs.logits, outputs.pred_boxes lowerCAmelCase_ , lowerCAmelCase_ = None, None if yolos_name == "yolos_ti": lowerCAmelCase_ = torch.tensor( [[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] ) lowerCAmelCase_ = torch.tensor( [[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] ) elif yolos_name == "yolos_s_200_pre": lowerCAmelCase_ = torch.tensor( [[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] ) lowerCAmelCase_ = torch.tensor( [[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] ) elif yolos_name == "yolos_s_300_pre": lowerCAmelCase_ = torch.tensor( [[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] ) lowerCAmelCase_ = torch.tensor( [[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] ) elif yolos_name == "yolos_s_dWr": lowerCAmelCase_ = torch.tensor( [[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] ) elif yolos_name == "yolos_base": lowerCAmelCase_ = torch.tensor( [[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] ) lowerCAmelCase_ = torch.tensor( [[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] ) else: raise ValueError(f"Unknown yolos_name: {yolos_name}" ) assert torch.allclose(logits[0, :3, :3] , _A , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , _A , atol=1E-4 ) Path(_A ).mkdir(exist_ok=_A ) print(f"Saving model {yolos_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_A ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(_A ) if push_to_hub: lowerCAmelCase_ = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) lowerCAmelCase_ = model_mapping[yolos_name] image_processor.push_to_hub(_A , organization='''hustvl''' ) model.push_to_hub(_A , organization='''hustvl''' ) if __name__ == "__main__": _A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--yolos_name''', default='''yolos_s_200_pre''', type=str, help=( '''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',''' ''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.''' ), ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _A = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
278
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCAmelCase : Tuple =logging.get_logger(__name__) __lowerCAmelCase : Optional[int] ={ '''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''', } class _A ( _a ): snake_case__ : str = """instructblip_vision_model""" def __init__( self , __lowerCAmelCase=1408 , __lowerCAmelCase=6144 , __lowerCAmelCase=39 , __lowerCAmelCase=16 , __lowerCAmelCase=224 , __lowerCAmelCase=14 , __lowerCAmelCase="gelu" , __lowerCAmelCase=1E-6 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1E-10 , __lowerCAmelCase=True , **__lowerCAmelCase , ): """simple docstring""" super().__init__(**__lowerCamelCase ) lowercase = hidden_size lowercase = intermediate_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = patch_size lowercase = image_size lowercase = initializer_range lowercase = attention_dropout lowercase = layer_norm_eps lowercase = hidden_act lowercase = qkv_bias @classmethod def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(__lowerCamelCase ) lowercase = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": lowercase = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__lowerCamelCase , **__lowerCamelCase ) class _A ( _a ): snake_case__ : str = """instructblip_qformer""" def __init__( self , __lowerCAmelCase=3_0522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase="absolute" , __lowerCAmelCase=2 , __lowerCAmelCase=1408 , **__lowerCAmelCase , ): """simple docstring""" super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase ) lowercase = vocab_size lowercase = hidden_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = intermediate_size lowercase = hidden_dropout_prob lowercase = attention_probs_dropout_prob lowercase = max_position_embeddings lowercase = initializer_range lowercase = layer_norm_eps lowercase = position_embedding_type lowercase = cross_attention_frequency lowercase = encoder_hidden_size @classmethod def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" cls._set_token_in_kwargs(__lowerCamelCase ) lowercase = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("""model_type""" ) == "instructblip": lowercase = config_dict["""qformer_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__lowerCamelCase , **__lowerCamelCase ) class _A ( _a ): snake_case__ : Tuple = """instructblip""" snake_case__ : Optional[Any] = True def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=32 , **__lowerCAmelCase ): """simple docstring""" super().__init__(**__lowerCamelCase ) if vision_config is None: lowercase = {} logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" ) if qformer_config is None: lowercase = {} logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" ) if text_config is None: lowercase = {} logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" ) lowercase = InstructBlipVisionConfig(**__lowerCamelCase ) lowercase = InstructBlipQFormerConfig(**__lowerCamelCase ) lowercase = text_config["""model_type"""] if """model_type""" in text_config else """opt""" lowercase = CONFIG_MAPPING[text_model_type](**__lowerCamelCase ) lowercase = self.text_config.tie_word_embeddings lowercase = self.text_config.is_encoder_decoder lowercase = num_query_tokens lowercase = self.vision_config.hidden_size lowercase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES lowercase = 1.0 lowercase = 0.0_2 @classmethod def A__ ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , ): """simple docstring""" return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__lowerCamelCase , ) def A__ ( self ): """simple docstring""" lowercase = copy.deepcopy(self.__dict__ ) lowercase = self.vision_config.to_dict() lowercase = self.qformer_config.to_dict() lowercase = self.text_config.to_dict() lowercase = self.__class__.model_type return output
367
"""simple docstring""" from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake __lowerCAmelCase : List[Any] =numpy.array([0, 0]) __lowerCAmelCase : List[str] =numpy.array([0.5, 0.866_0254]) __lowerCAmelCase : List[Any] =numpy.array([1, 0]) __lowerCAmelCase : int =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] , lowerCAmelCase__ :int ) -> list[numpy.ndarray]: '''simple docstring''' lowercase = initial_vectors for _ in range(lowerCAmelCase__ ): lowercase = iteration_step(lowerCAmelCase__ ) return vectors def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> list[numpy.ndarray]: '''simple docstring''' lowercase = [] for i, start_vector in enumerate(vectors[:-1] ): lowercase = vectors[i + 1] new_vectors.append(lowerCAmelCase__ ) lowercase = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def UpperCAmelCase__ ( lowerCAmelCase__ :numpy.ndarray , lowerCAmelCase__ :float ) -> numpy.ndarray: '''simple docstring''' lowercase = numpy.radians(lowerCAmelCase__ ) lowercase , lowercase = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ ) lowercase = numpy.array(((c, -s), (s, c)) ) return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> None: '''simple docstring''' lowercase = plt.gca() axes.set_aspect("""equal""" ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() lowercase , lowercase = zip(*lowerCAmelCase__ ) plt.plot(lowerCAmelCase__ , lowerCAmelCase__ ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : Optional[int] =iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
32
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ = logging.get_logger(__name__) UpperCAmelCase__ = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class lowerCamelCase__ ( __lowerCAmelCase): SCREAMING_SNAKE_CASE__ = "roberta" def __init__(self , UpperCAmelCase=5_0_2_6_5 , UpperCAmelCase=7_6_8 , UpperCAmelCase=1_2 , UpperCAmelCase=1_2 , UpperCAmelCase=3_0_7_2 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=1e-12 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase="absolute" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> List[Any]: super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ ) _lowercase =vocab_size _lowercase =hidden_size _lowercase =num_hidden_layers _lowercase =num_attention_heads _lowercase =hidden_act _lowercase =intermediate_size _lowercase =hidden_dropout_prob _lowercase =attention_probs_dropout_prob _lowercase =max_position_embeddings _lowercase =type_vocab_size _lowercase =initializer_range _lowercase =layer_norm_eps _lowercase =position_embedding_type _lowercase =use_cache _lowercase =classifier_dropout class lowerCamelCase__ ( __lowerCAmelCase): @property def __A (self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowercase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _lowercase ={0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
5
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase) , "Tatoeba directory does not exist.") class __magic_name__ ( unittest.TestCase): @cached_property def UpperCAmelCase__ ( self : str ) -> List[Any]: '''simple docstring''' UpperCamelCase__ : Tuple = tempfile.mkdtemp() return TatoebaConverter(save_dir=lowerCamelCase__ ) @slow def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' self.resolver.convert_models(['''heb-eng'''] ) @slow def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]: '''simple docstring''' UpperCamelCase__ , UpperCamelCase__ : Dict = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=lowerCamelCase__ ) assert mmeta["long_pair"] == "heb-eng"
146
0
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time __SCREAMING_SNAKE_CASE =Lock() def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ): global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(__SCREAMING_SNAKE_CASE ) process_lock.release() # receive your right neighbor's value process_lock.acquire() lowercase_ : Union[str, Any] = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left lowercase_ : Dict = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(__SCREAMING_SNAKE_CASE ) process_lock.release() # receive your left neighbor's value process_lock.acquire() lowercase_ : Optional[Any] = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right lowercase_ : Optional[Any] = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # after all swaps are performed, send the values back to main result_pipe[1].send(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ): lowercase_ : Optional[int] = [] lowercase_ : List[Any] = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop lowercase_ : Union[str, Any] = Pipe() lowercase_ : int = Pipe() process_array_.append( Process( target=__SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) lowercase_ : Any = temp_rs lowercase_ : Union[str, Any] = temp_rr for i in range(1 , len(__SCREAMING_SNAKE_CASE ) - 1 ): lowercase_ : Dict = Pipe() lowercase_ : Tuple = Pipe() process_array_.append( Process( target=__SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) lowercase_ : str = temp_rs lowercase_ : int = temp_rr process_array_.append( Process( target=__SCREAMING_SNAKE_CASE , args=( len(__SCREAMING_SNAKE_CASE ) - 1, arr[len(__SCREAMING_SNAKE_CASE ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(__SCREAMING_SNAKE_CASE ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(__SCREAMING_SNAKE_CASE ) ): lowercase_ : Any = result_pipe[p][0].recv() process_array_[p].join() return arr def lowercase__( ): lowercase_ : Optional[int] = list(range(10 , 0 , -1 ) ) print('Initial List' ) print(*__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = odd_even_transposition(__SCREAMING_SNAKE_CASE ) print('Sorted List\n' ) print(*__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
360
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
0
from datetime import datetime import requests def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> bytes: '''simple docstring''' UpperCAmelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' UpperCAmelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(UpperCamelCase__ ).content if __name__ == "__main__": __A : Union[str, Any] = input("Enter Video/IGTV url: ").strip() __A : Tuple = F'{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4' with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F'Done. Video saved to disk as {file_name}.')
273
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9}, }, ] ) class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , ) assert hasattr(self , '''env''' ) def _lowercase ( self , _A=1 ): '''simple docstring''' return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , ) def _lowercase ( self , _A ): '''simple docstring''' TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.create_estimator() # run training estimator.fit() # result dataframe UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
273
1
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = torch.load(__UpperCamelCase , map_location="""cpu""" ) SCREAMING_SNAKE_CASE__ = chkpt["""model"""] # We have the base model one level deeper than the original XLM repository SCREAMING_SNAKE_CASE__ = {} for k, v in state_dict.items(): if "pred_layer" in k: SCREAMING_SNAKE_CASE__ = v else: SCREAMING_SNAKE_CASE__ = v SCREAMING_SNAKE_CASE__ = chkpt["""params"""] SCREAMING_SNAKE_CASE__ = {n: v for n, v in config.items() if not isinstance(__UpperCamelCase , (torch.FloatTensor, numpy.ndarray) )} SCREAMING_SNAKE_CASE__ = chkpt["""dico_word2id"""] SCREAMING_SNAKE_CASE__ = {s + """</w>""" if s.find("""@@""" ) == -1 and i > 13 else s.replace("""@@""" , """""" ): i for s, i in vocab.items()} # Save pytorch-model SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME SCREAMING_SNAKE_CASE__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""vocab_file"""] print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(__UpperCamelCase , __UpperCamelCase ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(__UpperCamelCase , indent=2 ) + """\n""" ) print(f"""Save vocab file to {pytorch_config_dump_path}""" ) with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(__UpperCamelCase , indent=2 ) + """\n""" ) if __name__ == "__main__": __lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __lowerCamelCase : int = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
204
from math import isqrt def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> list[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE__ = False return [i for i in range(2 , __UpperCamelCase ) if is_prime[i]] def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**8 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ = calculate_prime_numbers(max_number // 2 ) SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F"""{solution() = }""")
204
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE__ = { "configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"], "tokenization_ctrl": ["CTRLTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ "CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", "CTRLPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", "TFCTRLPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
46
import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _SCREAMING_SNAKE_CASE = 16 _SCREAMING_SNAKE_CASE = 32 def snake_case ( snake_case__ :Optional[int]) -> str: return int(x / 2**20) class a : """simple docstring""" def __enter__( self ) -> List[str]: gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero _A = torch.cuda.memory_allocated() return self def __exit__( self , *lowerCAmelCase_ ) -> Optional[int]: gc.collect() torch.cuda.empty_cache() _A = torch.cuda.memory_allocated() _A = torch.cuda.max_memory_allocated() _A = bamb(self.end - self.begin ) _A = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def snake_case ( snake_case__ :Accelerator , snake_case__ :int = 16 , snake_case__ :str = "bert-base-cased" , snake_case__ :int = 320 , snake_case__ :int = 160 , ) -> Dict: _A = AutoTokenizer.from_pretrained(snake_case__) _A = load_dataset( """glue""" , """mrpc""" , split={"""train""": F'''train[:{n_train}]''', """validation""": F'''validation[:{n_val}]'''}) def tokenize_function(snake_case__ :Optional[int]): # max_length=None => use the model max length (it's actually the default) _A = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _A = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=snake_case__) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _A = tokenized_datasets.rename_column("""label""" , """labels""") def collate_fn(snake_case__ :List[str]): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""") return tokenizer.pad(snake_case__ , padding="""longest""" , return_tensors="""pt""") # Instantiate dataloaders. _A = DataLoader( tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__) _A = DataLoader( tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__) return train_dataloader, eval_dataloader def snake_case ( snake_case__ :Optional[int] , snake_case__ :Optional[int]) -> Optional[int]: # Initialize accelerator _A = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _A = config["""lr"""] _A = int(config["""num_epochs"""]) _A = int(config["""seed"""]) _A = int(config["""batch_size"""]) _A = args.model_name_or_path set_seed(snake_case__) _A , _A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ , args.n_train , args.n_val) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__) # Instantiate optimizer _A = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _A = optimizer_cls(params=model.parameters() , lr=snake_case__) if accelerator.state.deepspeed_plugin is not None: _A = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: _A = 1 _A = (len(snake_case__) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _A = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , ) else: _A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _A , _A , _A , _A , _A = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__) # We need to keep track of how many total steps we have iterated over _A = 0 # We also need to keep track of the stating epoch so files are named properly _A = 0 # Now we train the model _A = {} for epoch in range(snake_case__ , snake_case__): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(snake_case__): _A = model(**snake_case__) _A = outputs.loss _A = loss / gradient_accumulation_steps accelerator.backward(snake_case__) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("""Memory before entering the train : {}""".format(bamb(tracemalloc.begin))) accelerator.print("""Memory consumed at the end of the train (end-begin): {}""".format(tracemalloc.used)) accelerator.print("""Peak Memory consumed during the train (max-begin): {}""".format(tracemalloc.peaked)) accelerator.print( """Total Peak Memory consumed during the train (max): {}""".format( tracemalloc.peaked + bamb(tracemalloc.begin))) _A = tracemalloc.peaked + bamb(tracemalloc.begin) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F'''epoch-{epoch}'''] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """peak_memory_utilization.json""") , """w""") as f: json.dump(snake_case__ , snake_case__) def snake_case ( ) -> Optional[int]: _A = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""") parser.add_argument( """--model_name_or_path""" , type=snake_case__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=snake_case__ , ) parser.add_argument( """--output_dir""" , type=snake_case__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--peak_memory_upper_bound""" , type=snake_case__ , default=snake_case__ , help="""The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.""" , ) parser.add_argument( """--n_train""" , type=snake_case__ , default=320 , help="""Number of training examples to use.""" , ) parser.add_argument( """--n_val""" , type=snake_case__ , default=160 , help="""Number of validation examples to use.""" , ) parser.add_argument( """--num_epochs""" , type=snake_case__ , default=1 , help="""Number of train epochs.""" , ) _A = parser.parse_args() _A = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(snake_case__ , snake_case__) if __name__ == "__main__": main()
180
0
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING _snake_case = logging.get_logger(__name__) @add_end_docstrings(snake_case_ ) class UpperCamelCase ( snake_case_ ): def __init__( self : str , **UpperCAmelCase__ : List[Any] ) -> str: super().__init__(**UpperCAmelCase__ ) if self.framework == "tf": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , """vision""" ) self.check_model_type(UpperCAmelCase__ ) def __call__( self : Any , UpperCAmelCase__ : Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCAmelCase__ : Union[str, List[str]] = None , **UpperCAmelCase__ : int , ) -> str: if "text_queries" in kwargs: _a : Any = kwargs.pop("""text_queries""" ) if isinstance(UpperCAmelCase__ , (str, Image.Image) ): _a : str = {"""image""": image, """candidate_labels""": candidate_labels} else: _a : Union[str, Any] = image _a : Tuple = super().__call__(UpperCAmelCase__ , **UpperCAmelCase__ ) return results def _lowercase ( self : int , **UpperCAmelCase__ : Any ) -> int: _a : Tuple = {} if "threshold" in kwargs: _a : Optional[int] = kwargs["""threshold"""] if "top_k" in kwargs: _a : List[str] = kwargs["""top_k"""] return {}, {}, postprocess_params def _lowercase ( self : Any , UpperCAmelCase__ : Optional[Any] ) -> str: _a : Any = load_image(inputs["""image"""] ) _a : str = inputs["""candidate_labels"""] if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _a : Optional[int] = candidate_labels.split(""",""" ) _a : Dict = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(UpperCAmelCase__ ): _a : int = self.tokenizer(UpperCAmelCase__ , return_tensors=self.framework ) _a : Optional[Any] = self.image_processor(UpperCAmelCase__ , return_tensors=self.framework ) yield { "is_last": i == len(UpperCAmelCase__ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def _lowercase ( self : Any , UpperCAmelCase__ : int ) -> Union[str, Any]: _a : Union[str, Any] = model_inputs.pop("""target_size""" ) _a : Any = model_inputs.pop("""candidate_label""" ) _a : List[Any] = model_inputs.pop("""is_last""" ) _a : Optional[Any] = self.model(**UpperCAmelCase__ ) _a : List[str] = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def _lowercase ( self : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=None ) -> Dict: _a : List[Any] = [] for model_output in model_outputs: _a : Dict = model_output["""candidate_label"""] _a : List[Any] = BaseModelOutput(UpperCAmelCase__ ) _a : int = self.image_processor.post_process_object_detection( outputs=UpperCAmelCase__ , threshold=UpperCAmelCase__ , target_sizes=model_output["""target_size"""] )[0] for index in outputs["scores"].nonzero(): _a : Optional[int] = outputs["""scores"""][index].item() _a : Tuple = self._get_bounding_box(outputs["""boxes"""][index][0] ) _a : List[str] = {"""score""": score, """label""": label, """box""": box} results.append(UpperCAmelCase__ ) _a : Optional[Any] = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x["score"] , reverse=UpperCAmelCase__ ) if top_k: _a : Optional[Any] = results[:top_k] return results def _lowercase ( self : List[str] , UpperCAmelCase__ : "torch.Tensor" ) -> Dict[str, int]: if self.framework != "pt": raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" ) _a , _a , _a , _a : List[str] = box.int().tolist() _a : Any = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
324
"""simple docstring""" import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py _snake_case = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. _snake_case = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. _snake_case = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') _snake_case = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. _snake_case = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)') # Fill this with tuples (pipeline_tag, model_mapping, auto_model) _snake_case = [ ('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'), ('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'), ('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'), ('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'), ('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'), ('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'), ('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'), ('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'), ('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'), ( 'zero-shot-object-detection', 'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForZeroShotObjectDetection', ), ('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'), ('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'), ('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'), ('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'), ( 'table-question-answering', 'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForTableQuestionAnswering', ), ('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'), ('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'), ( 'next-sentence-prediction', 'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES', 'AutoModelForNextSentencePrediction', ), ( 'audio-frame-classification', 'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioFrameClassification', ), ('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'), ( 'document-question-answering', 'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForDocumentQuestionAnswering', ), ( 'visual-question-answering', 'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForVisualQuestionAnswering', ), ('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'), ( 'zero-shot-image-classification', 'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForZeroShotImageClassification', ), ('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'), ('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'), ('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'), ] def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : Dict = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCamelCase__ ) return [m.group(0 ) for m in matches] def lowerCAmelCase__ ( ): '''simple docstring''' _a : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES _a : Optional[int] = { config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. _a : List[Any] = collections.defaultdict(UpperCamelCase__ ) _a : List[str] = collections.defaultdict(UpperCamelCase__ ) _a : Tuple = collections.defaultdict(UpperCamelCase__ ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(UpperCamelCase__ ): _a : str = None if _re_tf_models.match(UpperCamelCase__ ) is not None: _a : List[Any] = tf_models _a : int = _re_tf_models.match(UpperCamelCase__ ).groups()[0] elif _re_flax_models.match(UpperCamelCase__ ) is not None: _a : Any = flax_models _a : Any = _re_flax_models.match(UpperCamelCase__ ).groups()[0] elif _re_pt_models.match(UpperCamelCase__ ) is not None: _a : int = pt_models _a : int = _re_pt_models.match(UpperCamelCase__ ).groups()[0] if lookup_dict is not None: while len(UpperCamelCase__ ) > 0: if attr_name in model_prefix_to_model_type: _a : Optional[int] = True break # Try again after removing the last word in the name _a : List[Any] = """""".join(camel_case_split(UpperCamelCase__ )[:-1] ) _a : Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) _a : Dict = list(UpperCamelCase__ ) all_models.sort() _a : str = {"""model_type""": all_models} _a : List[Any] = [pt_models[t] for t in all_models] _a : str = [tf_models[t] for t in all_models] _a : Optional[int] = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure _a : str = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: _a : List[str] = """AutoProcessor""" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: _a : str = """AutoTokenizer""" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: _a : int = """AutoFeatureExtractor""" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. _a : int = """AutoTokenizer""" _a : Any = [processors[t] for t in all_models] return pd.DataFrame(UpperCamelCase__ ) def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : List[Any] = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: _a : List[Any] = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""] _a : Union[str, Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): # The type of pipeline may not exist in this framework if not hasattr(UpperCamelCase__ , UpperCamelCase__ ): continue # First extract all model_names _a : str = [] for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): model_names.append(UpperCamelCase__ ) else: model_names.extend(list(UpperCamelCase__ ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Dict = get_frameworks_table() _a : Optional[Any] = Dataset.from_pandas(UpperCamelCase__ ) _a : Any = hf_hub_download( """huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=UpperCamelCase__ ) _a : List[Any] = Dataset.from_json(UpperCamelCase__ ) _a : List[str] = { tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""]) for i in range(len(UpperCamelCase__ ) ) } _a : str = update_pipeline_and_auto_class_table(UpperCamelCase__ ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. _a : int = sorted(table.keys() ) _a : Union[str, Any] = pd.DataFrame( { """model_class""": model_classes, """pipeline_tag""": [table[m][0] for m in model_classes], """auto_class""": [table[m][1] for m in model_classes], } ) _a : Dict = Dataset.from_pandas(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , """frameworks.json""" ) ) tags_dataset.to_json(os.path.join(UpperCamelCase__ , """pipeline_tags.json""" ) ) if commit_sha is not None: _a : List[str] = ( F"""Update with commit {commit_sha}\n\nSee: """ F"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: _a : Optional[Any] = """Update""" upload_folder( repo_id="""huggingface/transformers-metadata""" , folder_path=UpperCamelCase__ , repo_type="""dataset""" , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , ) def lowerCAmelCase__ ( ): '''simple docstring''' _a : List[str] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} _a : Any = transformers_module.pipelines.SUPPORTED_TASKS _a : List[str] = [] for key in pipeline_tasks: if key not in in_table: _a : Tuple = pipeline_tasks[key]["""pt"""] if isinstance(UpperCamelCase__ , (list, tuple) ): _a : Dict = model[0] _a : List[str] = model.__name__ if model not in in_table.values(): missing.append(UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: _a : Union[str, Any] = """, """.join(UpperCamelCase__ ) raise ValueError( """The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """ F"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.') parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.') parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.') _snake_case = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
324
1
__lowerCamelCase : dict[tuple[int, int, int], int] = {} def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ): """simple docstring""" if late == 3 or absent == 2: return 0 # if we have no days left, and have not failed any other rules, # we have a prize string if days == 0: return 1 # No easy solution, so now we need to do the recursive calculation # First, check if the combination is already in the cache, and # if yes, return the stored value from there since we already # know the number of possible prize strings from this point on SCREAMING_SNAKE_CASE_ : Optional[int] = (days, absent, late) if key in cache: return cache[key] # now we calculate the three possible ways that can unfold from # this point on, depending on our attendance today # 1) if we are late (but not absent), the "absent" counter stays as # it is, but the "late" counter increases by one SCREAMING_SNAKE_CASE_ : Tuple = _calculate(days - 1 , lowerCAmelCase , late + 1 ) # 2) if we are absent, the "absent" counter increases by 1, and the # "late" counter resets to 0 SCREAMING_SNAKE_CASE_ : Optional[Any] = _calculate(days - 1 , absent + 1 , 0 ) # 3) if we are on time, this resets the "late" counter and keeps the # absent counter SCREAMING_SNAKE_CASE_ : str = _calculate(days - 1 , lowerCAmelCase , 0 ) SCREAMING_SNAKE_CASE_ : str = state_late + state_absent + state_ontime SCREAMING_SNAKE_CASE_ : Dict = prizestrings return prizestrings def _snake_case ( lowerCAmelCase : int = 3_0 ): """simple docstring""" return _calculate(lowerCAmelCase , absent=0 , late=0 ) if __name__ == "__main__": print(solution())
18
'''simple docstring''' from PIL import Image def __lowerCamelCase ( A__ , A__ ) -> Image: """simple docstring""" def brightness(A__ ) -> float: return 128 + level + (c - 128) if not -255.0 <= level <= 255.0: raise ValueError('level must be between -255.0 (black) and 255.0 (white)' ) return img.point(A__ ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change brightness to 100 _lowerCamelCase : List[str] = change_brightness(img, 100) brigt_img.save("image_data/lena_brightness.png", format="png")
28
0
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate lowerCamelCase_ = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow("""""", """|""", """|"""), datarow=DataRow("""""", """|""", """|"""), padding=1, with_header_hide=None, ) lowerCamelCase_ = [] lowerCamelCase_ = [] lowerCamelCase_ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} lowerCamelCase_ = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''', """emoji""": True, }, } ] lowerCamelCase_ = 0 for log in Path().glob("""*.log"""): lowerCamelCase_ = 0 with open(log, """r""") as f: for line in f: lowerCamelCase_ = json.loads(line) if line.get("""nodeid""", """""") != "": lowerCamelCase_ = line["""nodeid"""] if line.get("""duration""", None) is not None: lowerCamelCase_ = f'''{line["duration"]:.4f}''' if line.get("""outcome""", """""") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("""_""")[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) lowerCamelCase_ = [] log.unlink() lowerCamelCase_ = """""" lowerCamelCase_ = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += f"*{name[1:]}: {num_failed} failed test*\n" else: message += f"*{name[1:]}: {num_failed} failed tests*\n" lowerCamelCase_ = [] lowerCamelCase_ = {} for test in failed_tests: lowerCamelCase_ = test[0].split("""::""") lowerCamelCase_ = data[0].split("""/""")[-1] if data[0] not in filesafailed: lowerCamelCase_ = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) lowerCamelCase_ = [test[0] for test in failed_table] lowerCamelCase_ = list(set(files)) # Count number of instances in failed_tests lowerCamelCase_ = [] for file in individual_files: table.append([file, len(filesafailed[file])]) lowerCamelCase_ = tabulate( table, headers=["""Test Location""", """Num Failed"""], tablefmt=hf_table_format, stralign="""right""", ) message += f"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 3_0_0_0: lowerCamelCase_ = """Too many failed tests, please see the full report in the Action results.""" lowerCamelCase_ = len(err) + 1_0 lowerCamelCase_ = message[: 3_0_0_0 - offset] + f'''\n...\n```\n{err}''' print(f'''### {message}''') else: lowerCamelCase_ = """No failed tests! 🤗""" print(f'''## {message}''') payload.append(no_error_payload) if os.environ.get("""TEST_TYPE""", """""") != "": from slack_sdk import WebClient lowerCamelCase_ = WebClient(token=os.environ["""SLACK_API_TOKEN"""]) if message != "No failed tests! 🤗": lowerCamelCase_ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) lowerCamelCase_ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } payload.append(action_button) lowerCamelCase_ = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''', } ], } payload.append(date_report) lowerCamelCase_ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload) lowerCamelCase_ = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name lowerCamelCase_ = """""" for i, row in enumerate(test_failures): if row[0] != test_class: lowerCamelCase_ = row[0] else: lowerCamelCase_ = """""" lowerCamelCase_ = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''', }, } client.chat_postMessage( channel="""#accelerate-ci-daily""", thread_ts=ts, blocks=[payload], )
361
def lowerCamelCase ( a_ ) -> "list[int]": if upper_limit < 0: raise ValueError('Limit for the Catalan sequence must be ≥ 0' ) lowerCAmelCase_ = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 lowerCAmelCase_ = 1 if upper_limit > 0: lowerCAmelCase_ = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(a_ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: lowerCamelCase_ = int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(f'''The Catalan numbers from 0 through {N} are:''') print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
14
0
"""simple docstring""" from __future__ import annotations def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ): if days_between_payments <= 0: raise ValueError("days_between_payments must be > 0" ) if daily_interest_rate < 0: raise ValueError("daily_interest_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * daily_interest_rate * days_between_payments def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ): if number_of_compounding_periods <= 0: raise ValueError("number_of_compounding_periods must be > 0" ) if nominal_annual_interest_rate_percentage < 0: raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ): if number_of_years <= 0: raise ValueError("number_of_years must be > 0" ) if nominal_annual_percentage_rate < 0: raise ValueError("nominal_annual_percentage_rate must be >= 0" ) if principal <= 0: raise ValueError("principal must be > 0" ) return compound_interest( __lowerCamelCase, nominal_annual_percentage_rate / 365, number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
61
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __A : str = 'src/transformers' # This is to make sure the transformers module imported is the one in the repo. __A : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS) __A : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __A : Tuple = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)') __A : Dict = { 'DecisionTransformerConfig', 'EncoderDecoderConfig', 'MusicgenConfig', 'RagConfig', 'SpeechEncoderDecoderConfig', 'TimmBackboneConfig', 'VisionEncoderDecoderConfig', 'VisionTextDualEncoderConfig', 'LlamaConfig', } def __UpperCamelCase ( _A : Optional[Any] ) ->Union[str, Any]: """simple docstring""" lowerCamelCase_ =None # source code of `config_class` lowerCamelCase_ =inspect.getsource(_A ) lowerCamelCase_ =_re_checkpoint.findall(_A ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("""/""" ): lowerCamelCase_ =ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowerCamelCase_ =f'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: lowerCamelCase_ =ckpt_name break return checkpoint def __UpperCamelCase ( ) ->Tuple: """simple docstring""" lowerCamelCase_ =[] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowerCamelCase_ =get_checkpoint_from_config_class(_A ) lowerCamelCase_ =config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_A ) if len(_A ) > 0: lowerCamelCase_ ="""\n""".join(sorted(_A ) ) raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
154
0
'''simple docstring''' import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput lowerCAmelCase : List[Any] ='''scheduler_config.json''' class a_ ( _lowerCAmelCase ): __A = 1 __A = 2 __A = 3 __A = 4 __A = 5 __A = 6 __A = 7 __A = 8 __A = 9 __A = 10 __A = 11 __A = 12 __A = 13 __A = 14 @dataclass class a_ ( _lowerCAmelCase ): __A = 42 class a_ : __A = SCHEDULER_CONFIG_NAME __A = [] __A = True @classmethod def lowercase__ ( cls : Any , lowercase : Dict[str, Any] = None , lowercase : Optional[str] = None , lowercase : Optional[int]=False , **lowercase : Union[str, Any] , ): """simple docstring""" lowercase_ , lowercase_ , lowercase_ :List[str] = cls.load_config( pretrained_model_name_or_path=lowercase , subfolder=lowercase , return_unused_kwargs=lowercase , return_commit_hash=lowercase , **lowercase , ) return cls.from_config(lowercase , return_unused_kwargs=lowercase , **lowercase ) def lowercase__ ( self : Optional[int] , lowercase : Union[str, os.PathLike] , lowercase : bool = False , **lowercase : List[str] ): """simple docstring""" self.save_config(save_directory=lowercase , push_to_hub=lowercase , **lowercase ) @property def lowercase__ ( self : int ): """simple docstring""" return self._get_compatibles() @classmethod def lowercase__ ( cls : Union[str, Any] ): """simple docstring""" lowercase_ :Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) ) lowercase_ :int = importlib.import_module(__name__.split("." )[0] ) lowercase_ :List[str] = [ getattr(lowercase , lowercase ) for c in compatible_classes_str if hasattr(lowercase , lowercase ) ] return compatible_classes
147
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCAmelCase : int ={ '''configuration_clip''': [ '''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPConfig''', '''CLIPOnnxConfig''', '''CLIPTextConfig''', '''CLIPVisionConfig''', ], '''processing_clip''': ['''CLIPProcessor'''], '''tokenization_clip''': ['''CLIPTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any =['''CLIPTokenizerFast'''] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : str =['''CLIPFeatureExtractor'''] lowerCAmelCase : Optional[int] =['''CLIPImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Any =[ '''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPModel''', '''CLIPPreTrainedModel''', '''CLIPTextModel''', '''CLIPTextModelWithProjection''', '''CLIPVisionModel''', '''CLIPVisionModelWithProjection''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : Union[str, Any] =[ '''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCLIPModel''', '''TFCLIPPreTrainedModel''', '''TFCLIPTextModel''', '''TFCLIPVisionModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase : List[Any] =[ '''FlaxCLIPModel''', '''FlaxCLIPPreTrainedModel''', '''FlaxCLIPTextModel''', '''FlaxCLIPTextPreTrainedModel''', '''FlaxCLIPVisionModel''', '''FlaxCLIPVisionPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys lowerCAmelCase : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
147
1
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = [ ['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc'], ] def lowerCAmelCase_ ( __UpperCAmelCase: Any ) -> Optional[Any]: if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: UpperCamelCase__ : str = k.replace(__UpperCAmelCase , __UpperCAmelCase ) if k.startswith('''encoder''' ): UpperCamelCase__ : Optional[Any] = k.replace('''.attn''' , '''.self_attn''' ) UpperCamelCase__ : Union[str, Any] = k.replace('''norm1''' , '''self_attn_layer_norm''' ) UpperCamelCase__ : List[str] = k.replace('''norm2''' , '''final_layer_norm''' ) elif k.startswith('''decoder''' ): UpperCamelCase__ : str = k.replace('''norm1''' , '''self_attn_layer_norm''' ) UpperCamelCase__ : List[Any] = k.replace('''norm2''' , '''encoder_attn_layer_norm''' ) UpperCamelCase__ : int = k.replace('''norm3''' , '''final_layer_norm''' ) return k def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> List[Any]: UpperCamelCase__ : Union[str, Any] = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: UpperCamelCase__ : Any = sd.pop(__UpperCAmelCase ) UpperCamelCase__ : Dict = k.replace('''layernorm_embedding''' , '''layer_norm''' ) assert new_k not in sd UpperCamelCase__ : Optional[int] = v UpperCAmelCase_ = ['START'] @torch.no_grad() def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: List[str] , __UpperCAmelCase: Any ) -> Optional[int]: UpperCamelCase__ : Tuple = torch.load(__UpperCAmelCase , map_location='''cpu''' ) UpperCamelCase__ : Optional[int] = model['''model'''] UpperCamelCase__ : Any = BlenderbotConfig.from_json_file(__UpperCAmelCase ) UpperCamelCase__ : int = BlenderbotForConditionalGeneration(__UpperCAmelCase ) UpperCamelCase__ : Optional[int] = m.model.state_dict().keys() UpperCamelCase__ : Optional[int] = [] UpperCamelCase__ : Tuple = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue UpperCamelCase__ : List[str] = rename_state_dict_key(__UpperCAmelCase ) if new_k not in valid_keys: failures.append([k, new_k] ) else: UpperCamelCase__ : str = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(__UpperCAmelCase ) m.model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase ) m.half() m.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument( '--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use' ) UpperCAmelCase_ = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
201
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: int ) -> float: if digit_amount > 0: return round(number - int(__UpperCAmelCase ) , __UpperCAmelCase ) return number - int(__UpperCAmelCase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
201
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { 's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json', } class lowerCAmelCase_ ( _UpperCamelCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = 'open-llama' def __init__( self : str , SCREAMING_SNAKE_CASE_ : List[Any]=10_00_00 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=40_96 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_10_08 , SCREAMING_SNAKE_CASE_ : List[Any]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=32 , SCREAMING_SNAKE_CASE_ : Dict="silu" , SCREAMING_SNAKE_CASE_ : str=20_48 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=1E-6 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , SCREAMING_SNAKE_CASE_ : Any=2 , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> Any: '''simple docstring''' A: List[Any] = vocab_size A: Any = max_position_embeddings A: List[str] = hidden_size A: Optional[Any] = intermediate_size A: Optional[Any] = num_hidden_layers A: int = num_attention_heads A: List[Any] = hidden_act A: Any = initializer_range A: Union[str, Any] = rms_norm_eps A: List[str] = use_cache A: List[Any] = kwargs.pop( '''use_memorry_efficient_attention''' , _SCREAMING_SNAKE_CASE ) A: List[Any] = hidden_dropout_prob A: Tuple = attention_dropout_prob A: Tuple = use_stable_embedding A: Any = shared_input_output_embedding A: List[Any] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , tie_word_embeddings=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) def _snake_case ( self : List[Any] ) -> str: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _SCREAMING_SNAKE_CASE ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"""got {self.rope_scaling}""" ) A: int = self.rope_scaling.get('''type''' , _SCREAMING_SNAKE_CASE ) A: Tuple = self.rope_scaling.get('''factor''' , _SCREAMING_SNAKE_CASE ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"""`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or rope_scaling_factor <= 1.0: raise ValueError(f"""`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}""" )
368
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase ) -> Tuple: A: Tuple = len(__lowercase ) for i in range(length - 1 ): A: Dict = i for k in range(i + 1 , __lowercase ): if collection[k] < collection[least]: A: List[str] = k if least != i: A , A: Tuple = (collection[i], collection[least]) return collection if __name__ == "__main__": UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip() UpperCamelCase = [int(item) for item in user_input.split(''',''')] print(selection_sort(unsorted))
334
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : str = logging.get_logger(__name__) lowercase : int = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCamelCase__ ( __A): '''simple docstring''' _A = 'sew-d' def __init__( self :Optional[Any] , a :List[str]=3_2 , a :Dict=7_6_8 , a :List[str]=1_2 , a :List[Any]=1_2 , a :Tuple=3_0_7_2 , a :Any=2 , a :Optional[Any]=5_1_2 , a :List[str]=2_5_6 , a :int=True , a :Dict=True , a :str=("p2c", "c2p") , a :Union[str, Any]="layer_norm" , a :Union[str, Any]="gelu_python" , a :Optional[Any]=0.1 , a :int=0.1 , a :Any=0.1 , a :int=0.0 , a :Optional[int]=0.1 , a :List[Any]=0.02 , a :Union[str, Any]=1E-7 , a :List[str]=1E-5 , a :Dict="group" , a :Dict="gelu" , a :Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , a :Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a :Union[str, Any]=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a :str=False , a :Optional[Any]=1_2_8 , a :Optional[int]=1_6 , a :Optional[int]=True , a :int=0.05 , a :Any=1_0 , a :Dict=2 , a :List[Any]=0.0 , a :Union[str, Any]=1_0 , a :Dict=0 , a :Optional[Any]="mean" , a :Tuple=False , a :List[str]=False , a :List[str]=2_5_6 , a :Optional[Any]=0 , a :int=1 , a :Union[str, Any]=2 , **a :Optional[int] , ) -> Dict: super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ ) __UpperCamelCase : List[str] = hidden_size __UpperCamelCase : Tuple = feat_extract_norm __UpperCamelCase : Optional[Any] = feat_extract_activation __UpperCamelCase : str = list(lowerCAmelCase_ ) __UpperCamelCase : Any = list(lowerCAmelCase_ ) __UpperCamelCase : Optional[Any] = list(lowerCAmelCase_ ) __UpperCamelCase : Tuple = conv_bias __UpperCamelCase : List[str] = num_conv_pos_embeddings __UpperCamelCase : Tuple = num_conv_pos_embedding_groups __UpperCamelCase : Tuple = len(self.conv_dim ) __UpperCamelCase : Optional[Any] = num_hidden_layers __UpperCamelCase : Any = intermediate_size __UpperCamelCase : Tuple = squeeze_factor __UpperCamelCase : Optional[Any] = max_position_embeddings __UpperCamelCase : Tuple = position_buckets __UpperCamelCase : List[str] = share_att_key __UpperCamelCase : Dict = relative_attention __UpperCamelCase : Union[str, Any] = norm_rel_ebd __UpperCamelCase : Dict = list(lowerCAmelCase_ ) __UpperCamelCase : Optional[Any] = hidden_act __UpperCamelCase : List[Any] = num_attention_heads __UpperCamelCase : Optional[int] = hidden_dropout __UpperCamelCase : Union[str, Any] = attention_dropout __UpperCamelCase : int = activation_dropout __UpperCamelCase : Tuple = feat_proj_dropout __UpperCamelCase : Optional[int] = final_dropout __UpperCamelCase : Tuple = layer_norm_eps __UpperCamelCase : str = feature_layer_norm_eps __UpperCamelCase : Union[str, Any] = initializer_range __UpperCamelCase : Any = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __UpperCamelCase : Optional[int] = apply_spec_augment __UpperCamelCase : Optional[Any] = mask_time_prob __UpperCamelCase : int = mask_time_length __UpperCamelCase : List[Any] = mask_time_min_masks __UpperCamelCase : Optional[Any] = mask_feature_prob __UpperCamelCase : List[Any] = mask_feature_length __UpperCamelCase : str = mask_feature_min_masks # ctc loss __UpperCamelCase : Any = ctc_loss_reduction __UpperCamelCase : Dict = ctc_zero_infinity # sequence classification __UpperCamelCase : Dict = use_weighted_layer_sum __UpperCamelCase : Union[str, Any] = classifier_proj_size @property def _lowerCamelCase ( self :Tuple ) -> Optional[Any]: return functools.reduce(operator.mul , self.conv_stride , 1 )
232
"""simple docstring""" import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = BertTokenizer __magic_name__ = BertTokenizerFast __magic_name__ = True __magic_name__ = True __magic_name__ = filter_non_english def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: super().setUp() UpperCAmelCase_ : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = "UNwant\u00E9d,running" UpperCAmelCase_ : Any = "unwanted, running" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file ) UpperCAmelCase_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: if not self.test_rust_tokenizer: return UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : int = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # With lower casing UpperCAmelCase_ : Tuple = self.get_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : List[Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: UpperCAmelCase_ : Optional[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def _SCREAMING_SNAKE_CASE ( self : int ) -> int: UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = BasicTokenizer() UpperCAmelCase_ : Dict = "a\n'll !!to?'d of, can't." UpperCAmelCase_ : List[str] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."] self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] UpperCAmelCase_ : Tuple = {} for i, token in enumerate(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[int] = i UpperCAmelCase_ : Optional[Any] = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : List[str] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("bert-base-uncased" ) UpperCAmelCase_ : Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" UpperCAmelCase_ : Tuple = tokenizer_r.encode_plus( lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , ) UpperCAmelCase_ : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False UpperCAmelCase_ : List[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = ["的", "人", "有"] UpperCAmelCase_ : Tuple = "".join(lowerCAmelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : int = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". UpperCAmelCase_ : Tuple = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ ) ] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
268
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ : Optional[int] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Any = [ "FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FocalNetForImageClassification", "FocalNetForMaskedImageModeling", "FocalNetBackbone", "FocalNetModel", "FocalNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys lowercase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
351
"""simple docstring""" import logging from transformers.configuration_utils import PretrainedConfig lowercase__ : List[Any] = logging.getLogger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" _SCREAMING_SNAKE_CASE = """masked_bert""" def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE_ : int=7_6_8 , SCREAMING_SNAKE_CASE_ : Tuple=1_2 , SCREAMING_SNAKE_CASE_ : List[Any]=1_2 , SCREAMING_SNAKE_CASE_ : Tuple=3_0_7_2 , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Dict=0.02 , SCREAMING_SNAKE_CASE_ : Any=1E-12 , SCREAMING_SNAKE_CASE_ : List[str]=0 , SCREAMING_SNAKE_CASE_ : Optional[int]="topK" , SCREAMING_SNAKE_CASE_ : Optional[int]="constant" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , **SCREAMING_SNAKE_CASE_ : Any , ): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : List[Any] = vocab_size lowerCAmelCase_ : str = hidden_size lowerCAmelCase_ : Optional[int] = num_hidden_layers lowerCAmelCase_ : Dict = num_attention_heads lowerCAmelCase_ : List[str] = hidden_act lowerCAmelCase_ : List[Any] = intermediate_size lowerCAmelCase_ : Any = hidden_dropout_prob lowerCAmelCase_ : str = attention_probs_dropout_prob lowerCAmelCase_ : Any = max_position_embeddings lowerCAmelCase_ : Dict = type_vocab_size lowerCAmelCase_ : Tuple = initializer_range lowerCAmelCase_ : List[Any] = layer_norm_eps lowerCAmelCase_ : str = pruning_method lowerCAmelCase_ : Optional[Any] = mask_init lowerCAmelCase_ : int = mask_scale
289
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Dict = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class _UpperCAmelCase ( lowercase_ ): '''simple docstring''' lowerCamelCase__ ="""cvt""" def __init__(self , a_=3 , a_=[7, 3, 3] , a_=[4, 2, 2] , a_=[2, 1, 1] , a_=[64, 1_92, 3_84] , a_=[1, 3, 6] , a_=[1, 2, 10] , a_=[4.0, 4.0, 4.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.0] , a_=[0.0, 0.0, 0.1] , a_=[True, True, True] , a_=[False, False, True] , a_=["dw_bn", "dw_bn", "dw_bn"] , a_=[3, 3, 3] , a_=[1, 1, 1] , a_=[2, 2, 2] , a_=[1, 1, 1] , a_=[1, 1, 1] , a_=0.02 , a_=1E-12 , **a_ , ): '''simple docstring''' super().__init__(**a_ ) __snake_case : Optional[Any] = num_channels __snake_case : Dict = patch_sizes __snake_case : List[Any] = patch_stride __snake_case : Optional[int] = patch_padding __snake_case : Any = embed_dim __snake_case : List[Any] = num_heads __snake_case : Tuple = depth __snake_case : Any = mlp_ratio __snake_case : str = attention_drop_rate __snake_case : Dict = drop_rate __snake_case : Optional[Any] = drop_path_rate __snake_case : Tuple = qkv_bias __snake_case : Dict = cls_token __snake_case : int = qkv_projection_method __snake_case : Optional[int] = kernel_qkv __snake_case : Any = padding_kv __snake_case : int = stride_kv __snake_case : List[Any] = padding_q __snake_case : List[Any] = stride_q __snake_case : List[Any] = initializer_range __snake_case : Optional[int] = layer_norm_eps
102
'''simple docstring''' def _SCREAMING_SNAKE_CASE (A ) -> int: """simple docstring""" if not isinstance(A , A ): raise TypeError('''only integers accepted as input''' ) else: lowercase__ = str(abs(A ) ) lowercase__ = [list(A ) for char in range(len(A ) )] for index in range(len(A ) ): num_transpositions[index].pop(A ) return max( int(''''''.join(list(A ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
2
0
"""simple docstring""" from __future__ import annotations import math def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): if len(__lowerCamelCase ) != 2 or len(a[0] ) != 2 or len(__lowerCamelCase ) != 2 or len(b[0] ) != 2: raise Exception('Matrices are not 2x2' ) __lowerCAmelCase : Tuple = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__lowerCamelCase ) ) ] def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(__lowerCamelCase ) ) ] def __lowerCAmelCase (_UpperCamelCase ): if len(__lowerCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('Odd matrices are not supported!' ) __lowerCAmelCase : Optional[int] = len(__lowerCamelCase ) __lowerCAmelCase : int = matrix_length // 2 __lowerCAmelCase : Any = [[a[i][j] for j in range(__lowerCamelCase , __lowerCamelCase )] for i in range(__lowerCamelCase )] __lowerCAmelCase : str = [ [a[i][j] for j in range(__lowerCamelCase , __lowerCamelCase )] for i in range(__lowerCamelCase , __lowerCamelCase ) ] __lowerCAmelCase : List[str] = [[a[i][j] for j in range(__lowerCamelCase )] for i in range(__lowerCamelCase )] __lowerCAmelCase : List[Any] = [[a[i][j] for j in range(__lowerCamelCase )] for i in range(__lowerCamelCase , __lowerCamelCase )] return top_left, top_right, bot_left, bot_right def __lowerCAmelCase (_UpperCamelCase ): return len(__lowerCamelCase ), len(matrix[0] ) def __lowerCAmelCase (_UpperCamelCase ): print('\n'.join(str(__lowerCamelCase ) for line in matrix ) ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): if matrix_dimensions(__lowerCamelCase ) == (2, 2): return default_matrix_multiplication(__lowerCamelCase , __lowerCamelCase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = split_matrix(__lowerCamelCase ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = split_matrix(__lowerCamelCase ) __lowerCAmelCase : List[str] = actual_strassen(__lowerCamelCase , matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) ) __lowerCAmelCase : str = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) __lowerCAmelCase : int = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) __lowerCAmelCase : Dict = actual_strassen(__lowerCamelCase , matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) ) __lowerCAmelCase : Optional[int] = actual_strassen(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) ) __lowerCAmelCase : List[Any] = actual_strassen(matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) ) __lowerCAmelCase : List[str] = actual_strassen(matrix_subtraction(__lowerCamelCase , __lowerCamelCase ) , matrix_addition(__lowerCamelCase , __lowerCamelCase ) ) __lowerCAmelCase : Dict = matrix_addition(matrix_subtraction(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) , __lowerCamelCase ) __lowerCAmelCase : List[str] = matrix_addition(__lowerCamelCase , __lowerCamelCase ) __lowerCAmelCase : str = matrix_addition(__lowerCamelCase , __lowerCamelCase ) __lowerCAmelCase : Tuple = matrix_subtraction(matrix_subtraction(matrix_addition(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) , __lowerCamelCase ) # construct the new matrix from our 4 quadrants __lowerCAmelCase : Tuple = [] for i in range(len(__lowerCamelCase ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(__lowerCamelCase ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): if matrix_dimensions(__lowerCamelCase )[1] != matrix_dimensions(__lowerCamelCase )[0]: __lowerCAmelCase : str = ( 'Unable to multiply these matrices, please check the dimensions.\n' F"Matrix A: {matrixa}\n" F"Matrix B: {matrixa}" ) raise Exception(__lowerCamelCase ) __lowerCAmelCase : Optional[Any] = matrix_dimensions(__lowerCamelCase ) __lowerCAmelCase : Optional[Any] = matrix_dimensions(__lowerCamelCase ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] __lowerCAmelCase : Union[str, Any] = max(*__lowerCamelCase , *__lowerCamelCase ) __lowerCAmelCase : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(__lowerCamelCase ) ) ) ) __lowerCAmelCase : int = matrixa __lowerCAmelCase : int = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , __lowerCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowerCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowerCamelCase ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) __lowerCAmelCase : List[Any] = actual_strassen(__lowerCamelCase , __lowerCamelCase ) # Removing the additional zeros for i in range(0 , __lowerCamelCase ): if i < dimensiona[0]: for _ in range(dimensiona[1] , __lowerCamelCase ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": lowerCamelCase__ = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] lowerCamelCase__ = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
368
"""simple docstring""" import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , _UpperCamelCase ) __lowerCAmelCase : Union[str, Any] = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: __lowerCAmelCase : Tuple = dataset_size < in_memory_max_size else: __lowerCAmelCase : str = False __lowerCAmelCase : Optional[int] = is_small_dataset(_UpperCamelCase ) assert result == expected
182
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> Dict: __lowerCamelCase : Optional[int] = tempfile.mkdtemp() # fmt: off __lowerCamelCase : Dict = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on __lowerCamelCase : Any = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) __lowerCamelCase : str = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] __lowerCamelCase : Optional[Any] = {'unk_token': '<unk>'} __lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) __lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) ) __lowerCamelCase : Optional[Any] = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], 'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowerCamelCase : str = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> Tuple: return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> List[str]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> List[str]: shutil.rmtree(self.tmpdirname ) def lowercase_ ( self ) -> Dict: __lowerCamelCase : List[str] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowerCamelCase : Any = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowercase_ ( self ) -> Tuple: __lowerCamelCase : int = self.get_tokenizer() __lowerCamelCase : Optional[Any] = self.get_rust_tokenizer() __lowerCamelCase : Dict = self.get_image_processor() __lowerCamelCase : int = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_slow.save_pretrained(self.tmpdirname ) __lowerCamelCase : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_fast.save_pretrained(self.tmpdirname ) __lowerCamelCase : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Union[str, Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowerCamelCase : List[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __lowerCamelCase : Union[str, Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 ) __lowerCamelCase : Optional[Any] = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Dict = self.get_image_processor() __lowerCamelCase : Union[str, Any] = self.get_tokenizer() __lowerCamelCase : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = self.prepare_image_inputs() __lowerCamelCase : Tuple = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ) __lowerCamelCase : Any = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowercase_ ( self ) -> str: __lowerCamelCase : Tuple = self.get_image_processor() __lowerCamelCase : str = self.get_tokenizer() __lowerCamelCase : Optional[Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = 'lower newer' __lowerCamelCase : str = processor(text=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowercase_ ( self ) -> Dict: __lowerCamelCase : int = self.get_image_processor() __lowerCamelCase : str = self.get_tokenizer() __lowerCamelCase : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = 'lower newer' __lowerCamelCase : List[Any] = self.prepare_image_inputs() __lowerCamelCase : Any = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_ ): processor() def lowercase_ ( self ) -> Union[str, Any]: __lowerCamelCase : Any = self.get_image_processor() __lowerCamelCase : Optional[Any] = self.get_tokenizer() __lowerCamelCase : Dict = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowerCamelCase : Optional[int] = processor.batch_decode(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self ) -> Dict: __lowerCamelCase : List[str] = self.get_image_processor() __lowerCamelCase : Dict = self.get_tokenizer() __lowerCamelCase : int = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Dict = 'lower newer' __lowerCamelCase : Optional[Any] = self.prepare_image_inputs() __lowerCamelCase : Dict = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
185
'''simple docstring''' A__ : Any = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1 def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
185
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) def __lowercase ( *snake_case_ : Any ,**snake_case_ : Tuple ) ->Union[str, Any]: '''simple docstring''' requires_backends(lowercase__ ,['''torch'''] ) def __lowercase ( *snake_case_ : Optional[Any] ,**snake_case_ : Optional[Any] ) ->Optional[int]: '''simple docstring''' requires_backends(lowercase__ ,['''torch'''] ) def __lowercase ( *snake_case_ : List[Any] ,**snake_case_ : Union[str, Any] ) ->Tuple: '''simple docstring''' requires_backends(lowercase__ ,['''torch'''] ) def __lowercase ( *snake_case_ : Tuple ,**snake_case_ : int ) ->List[str]: '''simple docstring''' requires_backends(lowercase__ ,['''torch'''] ) def __lowercase ( *snake_case_ : Tuple ,**snake_case_ : Tuple ) ->Dict: '''simple docstring''' requires_backends(lowercase__ ,['''torch'''] ) def __lowercase ( *snake_case_ : Any ,**snake_case_ : Union[str, Any] ) ->Optional[int]: '''simple docstring''' requires_backends(lowercase__ ,['''torch'''] ) def __lowercase ( *snake_case_ : Any ,**snake_case_ : Dict ) ->Union[str, Any]: '''simple docstring''' requires_backends(lowercase__ ,['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) class __snake_case ( metaclass=_A ): """simple docstring""" _lowerCamelCase = ["""torch"""] def __init__( self , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(self , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] ) @classmethod def UpperCamelCase__( cls , *__lowerCamelCase , **__lowerCamelCase ): '''simple docstring''' requires_backends(cls , ['''torch'''] )
363
"""simple docstring""" def __lowercase ( ) ->Tuple: '''simple docstring''' __A : str = [] __A : List[Any] = 1 while len(snake_case_ ) < 1e6: constant.append(str(snake_case_ ) ) i += 1 __A : Any = ''''''.join(snake_case_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
291
0
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class __lowerCAmelCase : def __init__( self , lowerCAmelCase__ , ) -> Optional[Any]: '''simple docstring''' a__ : Optional[Any] =parent a__ : List[Any] =1_3 a__ : Optional[Any] =7 a__ : List[Any] =3_0 a__ : Optional[int] =self.seq_length + self.mem_len a__ : str =1_5 a__ : Tuple =True a__ : Tuple =True a__ : int =9_9 a__ : Union[str, Any] =[1_0, 5_0, 8_0] a__ : Dict =3_2 a__ : List[Any] =3_2 a__ : Dict =4 a__ : int =8 a__ : Tuple =1_2_8 a__ : Union[str, Any] =2 a__ : Tuple =2 a__ : Any =None a__ : List[str] =1 a__ : Optional[Any] =0 a__ : Any =3 a__ : Any =self.vocab_size - 1 a__ : int =0.01 def _lowercase ( self ) -> List[str]: '''simple docstring''' a__ : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : Optional[int] =None if self.use_labels: a__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a__ : Union[str, Any] =TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def _lowercase ( self ) -> int: '''simple docstring''' random.seed(self.seed ) tf.random.set_seed(self.seed ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: '''simple docstring''' a__ : List[Any] =TFTransfoXLModel(lowerCAmelCase__ ) a__ , a__ : Dict =model(lowerCAmelCase__ ).to_tuple() a__ : List[Any] ={"input_ids": input_ids_a, "mems": mems_a} a__ , a__ : str =model(lowerCAmelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: '''simple docstring''' a__ : Union[str, Any] =TFTransfoXLLMHeadModel(lowerCAmelCase__ ) a__ , a__ : Optional[Any] =model(lowerCAmelCase__ ).to_tuple() a__ : Optional[int] ={"input_ids": input_ids_a, "labels": lm_labels} a__ , a__ : Optional[int] =model(lowerCAmelCase__ ).to_tuple() a__ , a__ : int =model([input_ids_a, mems_a] ).to_tuple() a__ : int ={"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels} a__ , a__ : str =model(lowerCAmelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' a__ : Optional[Any] =TFTransfoXLForSequenceClassification(lowerCAmelCase__ ) a__ : int =model(lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self ) -> int: '''simple docstring''' a__ : int =self.prepare_config_and_inputs() ((a__) , (a__) , (a__) , (a__)) : List[Any] =config_and_inputs a__ : List[str] ={"input_ids": input_ids_a} return config, inputs_dict @require_tf class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase): _lowercase : Union[str, Any] = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) _lowercase : Optional[Any] = () if is_tf_available() else () _lowercase : Optional[int] = ( { """feature-extraction""": TFTransfoXLModel, """text-classification""": TFTransfoXLForSequenceClassification, """text-generation""": TFTransfoXLLMHeadModel, """zero-shot""": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented _lowercase : Dict = False _lowercase : Optional[Any] = False _lowercase : List[str] = False _lowercase : str = False def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def _lowercase ( self ) -> List[Any]: '''simple docstring''' a__ : Tuple =TFTransfoXLModelTester(self ) a__ : List[Any] =ConfigTester(self , config_class=lowerCAmelCase__ , d_embed=3_7 ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' self.model_tester.set_seed() a__ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*lowerCAmelCase__ ) def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' self.model_tester.set_seed() a__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*lowerCAmelCase__ ) def _lowercase ( self ) -> List[str]: '''simple docstring''' a__ : int =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*lowerCAmelCase__ ) def _lowercase ( self ) -> Dict: '''simple docstring''' a__ , a__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common() a__ : str =[TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: a__ : Dict =model_class(lowerCAmelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: a__ : Union[str, Any] =model.get_output_embeddings() assert isinstance(lowerCAmelCase__ , tf.keras.layers.Layer ) a__ : str =model.get_bias() assert name is None else: a__ : Dict =model.get_output_embeddings() assert x is None a__ : Dict =model.get_bias() assert name is None def _lowercase ( self ) -> List[str]: '''simple docstring''' pass @slow def _lowercase ( self ) -> Any: '''simple docstring''' for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a__ : str =TFTransfoXLModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." ) def _lowercase ( self ) -> List[str]: '''simple docstring''' pass @require_tf class __lowerCAmelCase ( unittest.TestCase): @unittest.skip("Skip test until #12651 is resolved." ) @slow def _lowercase ( self ) -> Optional[int]: '''simple docstring''' a__ : str =TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" ) # fmt: off a__ : Tuple =tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off a__ : Union[str, Any] =[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> a__ : List[Any] =model.generate(lowerCAmelCase__ , max_length=2_0_0 , do_sample=lowerCAmelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
95
"""simple docstring""" # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests SCREAMING_SNAKE_CASE__ = open # noqa: we just need to have a builtin inside this module to test it properly
46
0
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _A ( snake_case , snake_case ) -> List[Any]: assert isinstance(__lowerCamelCase , __lowerCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def _A ( snake_case , snake_case , snake_case ) -> Tuple: _lowercase : Union[str, Any] = tmp_path / "cache" _lowercase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowercase : str = JsonDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read() _check_json_dataset(__lowerCamelCase , __lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def _A ( snake_case , snake_case , snake_case ) -> int: _lowercase : Optional[Any] = tmp_path / "cache" _lowercase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} _lowercase : int = features.copy() if features else default_expected_features _lowercase : Optional[Any] = ( Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _lowercase : Any = JsonDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read() _check_json_dataset(__lowerCamelCase , __lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def _A ( snake_case , snake_case , snake_case ) -> Any: _lowercase : List[str] = tmp_path / "cache" _lowercase : Dict = {"col_3": "float64", "col_1": "string", "col_2": "int64"} _lowercase : Any = features.copy() if features else default_expected_features _lowercase : Dict = ( Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _lowercase : Dict = JsonDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read() assert isinstance(__lowerCamelCase , __lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def _A ( snake_case , snake_case ) -> int: # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} _lowercase : str = {"col_2": "int64", "col_3": "float64", "col_1": "string"} _lowercase : str = features.copy() _lowercase : Union[str, Any] = ( Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _lowercase : Any = tmp_path / "cache" _lowercase : str = JsonDatasetReader(__lowerCamelCase , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read() assert isinstance(__lowerCamelCase , __lowerCamelCase ) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def _A ( snake_case , snake_case , snake_case ) -> Any: _lowercase : Dict = tmp_path / "cache" _lowercase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} _lowercase : Dict = JsonDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase , split=__lowerCamelCase ).read() _check_json_dataset(__lowerCamelCase , __lowerCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list] ) def _A ( snake_case , snake_case , snake_case ) -> Optional[Any]: if issubclass(__lowerCamelCase , __lowerCamelCase ): _lowercase : Dict = jsonl_path elif issubclass(__lowerCamelCase , __lowerCamelCase ): _lowercase : List[Any] = [jsonl_path] _lowercase : str = tmp_path / "cache" _lowercase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} _lowercase : str = JsonDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read() _check_json_dataset(__lowerCamelCase , __lowerCamelCase ) def _A ( snake_case , snake_case , snake_case=("train",) ) -> Dict: assert isinstance(__lowerCamelCase , __lowerCamelCase ) for split in splits: _lowercase : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True] ) def _A ( snake_case , snake_case , snake_case ) -> Union[str, Any]: _lowercase : Tuple = tmp_path / "cache" _lowercase : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowercase : List[str] = JsonDatasetReader({"train": jsonl_path} , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase ).read() _check_json_datasetdict(__lowerCamelCase , __lowerCamelCase ) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def _A ( snake_case , snake_case , snake_case ) -> int: _lowercase : int = tmp_path / "cache" _lowercase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} _lowercase : Any = features.copy() if features else default_expected_features _lowercase : Union[str, Any] = ( Features({feature: Value(__lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) _lowercase : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , features=__lowerCamelCase , cache_dir=__lowerCamelCase ).read() _check_json_datasetdict(__lowerCamelCase , __lowerCamelCase ) @pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] ) def _A ( snake_case , snake_case , snake_case ) -> List[str]: if split: _lowercase : Dict = {split: jsonl_path} else: _lowercase : Any = "train" _lowercase : Union[str, Any] = {"train": jsonl_path, "test": jsonl_path} _lowercase : Any = tmp_path / "cache" _lowercase : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} _lowercase : Dict = JsonDatasetReader(__lowerCamelCase , cache_dir=__lowerCamelCase ).read() _check_json_datasetdict(__lowerCamelCase , __lowerCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def _A ( snake_case ) -> Any: return json.load(__lowerCamelCase ) def _A ( snake_case ) -> List[str]: return [json.loads(__lowerCamelCase ) for line in buffer] class a__ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write() buffer.seek(0 ) _lowercase : Dict = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write() buffer.seek(0 ) _lowercase : Optional[Any] = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) _lowercase : Optional[Any] = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) _lowercase : List[Any] = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 def _lowerCamelCase ( self , _UpperCamelCase ): """simple docstring""" with pytest.raises(lowercase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): """simple docstring""" _lowercase : List[Any] = tmp_path_factory.mktemp("data" ) / f'''test.json.{extension}''' _lowercase : str = str(shared_datadir / f'''test_file.json.{extension}''' ) JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: _lowercase : List[str] = f.read() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: _lowercase : Dict = f.read() assert exported_content == original_content
351
'''simple docstring''' import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class a__ ( lowerCamelCase_ ): _SCREAMING_SNAKE_CASE : List[Any] = ['image_processor', 'tokenizer'] _SCREAMING_SNAKE_CASE : str = 'OwlViTImageProcessor' _SCREAMING_SNAKE_CASE : List[str] = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ): """simple docstring""" _lowercase : Dict = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , _UpperCamelCase , ) _lowercase : Optional[int] = kwargs.pop("feature_extractor" ) _lowercase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(_UpperCamelCase , _UpperCamelCase ) def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="max_length" , _UpperCamelCase="np" , **_UpperCamelCase ): """simple docstring""" if text is None and query_images is None and images is None: raise ValueError( "You have to specify at least one text or query image or image. All three cannot be none." ) if text is not None: if isinstance(_UpperCamelCase , _UpperCamelCase ) or (isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(text[0] , _UpperCamelCase )): _lowercase : int = [self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )] elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(text[0] , _UpperCamelCase ): _lowercase : str = [] # Maximum number of queries across batch _lowercase : str = max([len(_UpperCamelCase ) for t in text] ) # Pad all batch samples to max number of text queries for t in text: if len(_UpperCamelCase ) != max_num_queries: _lowercase : List[Any] = t + [" "] * (max_num_queries - len(_UpperCamelCase )) _lowercase : Tuple = self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ) encodings.append(_UpperCamelCase ) else: raise TypeError("Input text should be a string, a list of strings or a nested list of strings" ) if return_tensors == "np": _lowercase : List[Any] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _lowercase : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _lowercase : Union[str, Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 ) _lowercase : int = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 ) elif return_tensors == "pt" and is_torch_available(): import torch _lowercase : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 ) _lowercase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 ) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _lowercase : Optional[int] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 ) _lowercase : List[str] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 ) else: raise ValueError("Target return tensor type could not be returned" ) _lowercase : Optional[int] = BatchEncoding() _lowercase : List[Any] = input_ids _lowercase : Dict = attention_mask if query_images is not None: _lowercase : int = BatchEncoding() _lowercase : Any = self.image_processor( _UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ).pixel_values _lowercase : Any = query_pixel_values if images is not None: _lowercase : str = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ) if text is not None and images is not None: _lowercase : List[Any] = image_features.pixel_values return encoding elif query_images is not None and images is not None: _lowercase : Optional[Any] = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCamelCase ) , tensor_type=_UpperCamelCase ) def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" return self.image_processor.post_process(*_UpperCamelCase , **_UpperCamelCase ) def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_object_detection(*_UpperCamelCase , **_UpperCamelCase ) def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" return self.image_processor.post_process_image_guided_detection(*_UpperCamelCase , **_UpperCamelCase ) def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase ) def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase ) @property def _lowerCamelCase ( self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCamelCase , ) return self.image_processor_class @property def _lowerCamelCase ( self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCamelCase , ) return self.image_processor
199
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def A__ ( self ) -> Union[str, Any]: __lowerCAmelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) __lowerCAmelCase = get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(snake_case_ ) , torch_builtin(snake_case_ ) ) ) self.assertFalse(torch.allclose(gelu_python(snake_case_ ) , gelu_new(snake_case_ ) ) ) def A__ ( self ) -> Any: __lowerCAmelCase = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) __lowerCAmelCase = get_activation("""gelu""" ) __lowerCAmelCase = get_activation("""gelu_10""" ) __lowerCAmelCase = torch_builtin(snake_case_ ) __lowerCAmelCase = geluaa(snake_case_ ) __lowerCAmelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(snake_case_ ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def A__ ( self ) -> str: get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(snake_case_ ): get_activation("""bogus""" ) with self.assertRaises(snake_case_ ): get_activation(snake_case_ ) def A__ ( self ) -> Dict: __lowerCAmelCase = get_activation("""gelu""" ) __lowerCAmelCase = 1 __lowerCAmelCase = get_activation("""gelu""" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(snake_case_ ): __lowerCAmelCase = acta.a
301
"""simple docstring""" from math import isqrt, loga def lowercase (_lowerCAmelCase ): __lowerCAmelCase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , _lowerCAmelCase , _lowerCAmelCase ): __lowerCAmelCase = False return [i for i in range(2 , _lowerCAmelCase ) if is_prime[i]] def lowercase (_lowerCAmelCase = 80_0800 , _lowerCAmelCase = 80_0800 ): __lowerCAmelCase = degree * loga(_lowerCAmelCase ) __lowerCAmelCase = int(_lowerCAmelCase ) __lowerCAmelCase = calculate_prime_numbers(_lowerCAmelCase ) __lowerCAmelCase = 0 __lowerCAmelCase = 0 __lowerCAmelCase = len(_lowerCAmelCase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"{solution() = }")
301
1
"""simple docstring""" from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def UpperCAmelCase ( a_ = True, *a_, **a_ ): '''simple docstring''' if not is_tqdm_available(): raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' ) lowerCamelCase : Any = False if main_process_only: lowerCamelCase : Optional[int] = PartialState().local_process_index == 0 return _tqdm(*a_, **a_, disable=a_ )
363
"""simple docstring""" def UpperCAmelCase ( a_, a_ ): '''simple docstring''' while b: lowerCamelCase , lowerCamelCase : Tuple = b, a % b return a def UpperCAmelCase ( a_, a_ ): '''simple docstring''' return a if b == 0 else euclidean_gcd_recursive(a_, a % b ) def UpperCAmelCase ( ): '''simple docstring''' print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}""" ) print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}""" ) print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}""" ) print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}""" ) print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}""" ) print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}""" ) print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}""" ) print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}""" ) print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}""" ) print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}""" ) if __name__ == "__main__": main()
205
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _UpperCamelCase : List[Any] = logging.get_logger(__name__) def __UpperCAmelCase ( A : int ) -> Dict: if isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(UpperCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(UpperCamelCase__ ): return [[videos]] raise ValueError(F"Could not make batched video from {videos}" ) class snake_case__ ( a__): a_ = ['''pixel_values'''] def __init__( self : Dict , _A : Tuple = True , _A : List[str] = None , _A : Any = PILImageResampling.BILINEAR , _A : List[Any] = True , _A : Optional[int] = None , _A : Dict = True , _A : int = 1 / 2_55 , _A : List[Any] = True , _A : List[str] = True , _A : List[str] = None , _A : Optional[Any] = None , **_A : Optional[Any] , ) -> Optional[Any]: super().__init__(**_lowerCamelCase ) UpperCAmelCase_ : str = size if size is not None else {"""shortest_edge""": 2_56} UpperCAmelCase_ : List[str] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase ) UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} UpperCAmelCase_ : List[str] = get_size_dict(_lowerCamelCase , param_name='''crop_size''' ) UpperCAmelCase_ : Optional[int] = do_resize UpperCAmelCase_ : Union[str, Any] = size UpperCAmelCase_ : Dict = do_center_crop UpperCAmelCase_ : Tuple = crop_size UpperCAmelCase_ : Dict = resample UpperCAmelCase_ : Optional[int] = do_rescale UpperCAmelCase_ : Union[str, Any] = rescale_factor UpperCAmelCase_ : Optional[Any] = offset UpperCAmelCase_ : Tuple = do_normalize UpperCAmelCase_ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase_ : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def A ( self : Optional[int] , _A : Any , _A : Tuple , _A : Union[str, Any] = PILImageResampling.BILINEAR , _A : List[Any] = None , **_A : Optional[int] , ) -> Dict: UpperCAmelCase_ : Tuple = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase ) if "shortest_edge" in size: UpperCAmelCase_ : Tuple = get_resize_output_image_size(_lowerCamelCase , size['''shortest_edge'''] , default_to_square=_lowerCamelCase ) elif "height" in size and "width" in size: UpperCAmelCase_ : Dict = (size["""height"""], size["""width"""]) else: raise ValueError(F"Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}" ) return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def A ( self : List[Any] , _A : Dict , _A : Optional[Any] , _A : List[Any] = None , **_A : int , ) -> Union[str, Any]: UpperCAmelCase_ : int = get_size_dict(_lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F"Size must have \'height\' and \'width\' as keys. Got {size.keys()}" ) return center_crop(_lowerCamelCase , size=(size['''height'''], size['''width''']) , data_format=_lowerCamelCase , **_lowerCamelCase ) def A ( self : str , _A : List[Any] , _A : Optional[Any] , _A : Tuple = True , _A : Union[str, Any] = None , **_A : Tuple , ) -> int: UpperCAmelCase_ : Any = image.astype(np.floataa ) if offset: UpperCAmelCase_ : int = image - (scale / 2) return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def A ( self : int , _A : Any , _A : List[str] , _A : Optional[Any] , _A : Optional[int] = None , **_A : List[Any] , ) -> List[str]: return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def A ( self : Union[str, Any] , _A : Any , _A : List[str] = None , _A : int = None , _A : List[str] = None , _A : Union[str, Any] = None , _A : str = None , _A : Tuple = None , _A : List[Any] = None , _A : Any = None , _A : Union[str, Any] = None , _A : List[Any] = None , _A : Optional[Any] = None , _A : Tuple = ChannelDimension.FIRST , ) -> Optional[Any]: if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) if offset and not do_rescale: raise ValueError('''For offset, do_rescale must also be set to True.''' ) # All transformations expect numpy arrays. UpperCAmelCase_ : Tuple = to_numpy_array(_lowerCamelCase ) if do_resize: UpperCAmelCase_ : List[str] = self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) if do_center_crop: UpperCAmelCase_ : Optional[int] = self.center_crop(_lowerCamelCase , size=_lowerCamelCase ) if do_rescale: UpperCAmelCase_ : List[str] = self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase , offset=_lowerCamelCase ) if do_normalize: UpperCAmelCase_ : Optional[int] = self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase ) UpperCAmelCase_ : Optional[Any] = to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) return image def A ( self : Optional[Any] , _A : Any , _A : Dict = None , _A : int = None , _A : Tuple = None , _A : int = None , _A : Any = None , _A : Any = None , _A : Any = None , _A : Union[str, Any] = None , _A : int = None , _A : int = None , _A : Dict = None , _A : List[str] = None , _A : Any = ChannelDimension.FIRST , **_A : Optional[int] , ) -> List[str]: UpperCAmelCase_ : int = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : Union[str, Any] = resample if resample is not None else self.resample UpperCAmelCase_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : int = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : int = offset if offset is not None else self.offset UpperCAmelCase_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : str = image_std if image_std is not None else self.image_std UpperCAmelCase_ : List[Any] = size if size is not None else self.size UpperCAmelCase_ : Union[str, Any] = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase ) UpperCAmelCase_ : Tuple = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : Optional[Any] = get_size_dict(_lowerCamelCase , param_name='''crop_size''' ) if not valid_images(_lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) UpperCAmelCase_ : Union[str, Any] = make_batched(_lowerCamelCase ) UpperCAmelCase_ : Tuple = [ [ self._preprocess_image( image=_lowerCamelCase , do_resize=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , do_center_crop=_lowerCamelCase , crop_size=_lowerCamelCase , do_rescale=_lowerCamelCase , rescale_factor=_lowerCamelCase , offset=_lowerCamelCase , do_normalize=_lowerCamelCase , image_mean=_lowerCamelCase , image_std=_lowerCamelCase , data_format=_lowerCamelCase , ) for img in video ] for video in videos ] UpperCAmelCase_ : Tuple = {"""pixel_values""": videos} return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
304
'''simple docstring''' import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) __A =logging.getLogger() def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ): UpperCAmelCase__ : Union[str, Any] = """\n""".join(UpperCamelCase__ ) Path(UpperCamelCase__ ).open("""w""" ).writelines(UpperCamelCase__ ) __A ='patrickvonplaten/t5-tiny-random' __A ='sshleifer/bart-tiny-random' __A ='sshleifer/tiny-mbart' __A =logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class _snake_case ( a__ ): def snake_case__ ( self , _lowerCamelCase): UpperCAmelCase__ : Any = Path(self.get_auto_remove_tmp_dir()) / """utest_input.source""" UpperCAmelCase__ : Dict = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() UpperCAmelCase__ : Any = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""] _dump_articles(_lowerCamelCase , _lowerCamelCase) UpperCAmelCase__ : Optional[Any] = str(Path(self.get_auto_remove_tmp_dir()) / """scores.json""") UpperCAmelCase__ : int = """translation_en_to_de""" if model == T5_TINY else """summarization""" UpperCAmelCase__ : Union[str, Any] = f''' run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 '''.split() with patch.object(_lowerCamelCase , """argv""" , _lowerCamelCase): run_generate() assert Path(_lowerCamelCase).exists() # os.remove(Path(output_file_name)) def snake_case__ ( self): self.run_eval_tester(_lowerCamelCase) @parameterized.expand([BART_TINY, MBART_TINY]) @slow def snake_case__ ( self , _lowerCamelCase): self.run_eval_tester(_lowerCamelCase) @parameterized.expand([T5_TINY, MBART_TINY]) @slow def snake_case__ ( self , _lowerCamelCase): UpperCAmelCase__ : Optional[Any] = Path(self.get_auto_remove_tmp_dir()) / """utest_input.source""" UpperCAmelCase__ : List[str] = input_file_name.parent / """utest_output.txt""" assert not output_file_name.exists() UpperCAmelCase__ : int = { """en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""], """de""": [ """Maschinelles Lernen ist großartig, oder?""", """Ich esse gerne Bananen""", """Morgen ist wieder ein toller Tag!""", ], } UpperCAmelCase__ : int = Path(self.get_auto_remove_tmp_dir()) UpperCAmelCase__ : Any = str(tmp_dir / """scores.json""") UpperCAmelCase__ : List[str] = str(tmp_dir / """val.target""") _dump_articles(_lowerCamelCase , text["""en"""]) _dump_articles(_lowerCamelCase , text["""de"""]) UpperCAmelCase__ : int = """translation_en_to_de""" if model == T5_TINY else """summarization""" UpperCAmelCase__ : List[Any] = f''' run_eval_search.py {model} {str(_lowerCamelCase)} {str(_lowerCamelCase)} --score_path {score_path} --reference_path {reference_path} --task {task} '''.split() testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""]) with patch.object(_lowerCamelCase , """argv""" , _lowerCamelCase): with CaptureStdout() as cs: run_search() UpperCAmelCase__ : Optional[Any] = [""" num_beams | length_penalty""", model, """Best score args"""] UpperCAmelCase__ : Any = ["""Info"""] if "translation" in task: expected_strings.append("""bleu""") else: expected_strings.extend(_lowerCamelCase) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(_lowerCamelCase).exists() os.remove(Path(_lowerCamelCase))
163
0
"""simple docstring""" lowerCAmelCase : Dict = 256 # Modulus to hash a string lowerCAmelCase : Optional[Any] = 100_0003 def a__ ( snake_case__ , snake_case__ ) -> bool: lowerCamelCase = len(snake_case__ ) lowerCamelCase = len(snake_case__ ) if p_len > t_len: return False lowerCamelCase = 0 lowerCamelCase = 0 lowerCamelCase = 1 # Calculating the hash of pattern and substring of text for i in range(snake_case__ ): lowerCamelCase = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus lowerCamelCase = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue lowerCamelCase = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash lowerCamelCase = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def a__ ( ) -> None: lowerCamelCase = """abc1abc12""" lowerCamelCase = """alskfjaldsabc1abc1abc12k23adsfabcabc""" lowerCamelCase = """alskfjaldsk23adsfabcabc""" assert rabin_karp(snake_case__ , snake_case__ ) and not rabin_karp(snake_case__ , snake_case__ ) # Test 2) lowerCamelCase = """ABABX""" lowerCamelCase = """ABABZABABYABABX""" assert rabin_karp(snake_case__ , snake_case__ ) # Test 3) lowerCamelCase = """AAAB""" lowerCamelCase = """ABAAAAAB""" assert rabin_karp(snake_case__ , snake_case__ ) # Test 4) lowerCamelCase = """abcdabcy""" lowerCamelCase = """abcxabcdabxabcdabcdabcy""" assert rabin_karp(snake_case__ , snake_case__ ) # Test 5) lowerCamelCase = """Lü""" lowerCamelCase = """Lüsai""" assert rabin_karp(snake_case__ , snake_case__ ) lowerCamelCase = """Lue""" assert not rabin_karp(snake_case__ , snake_case__ ) print("""Success.""" ) if __name__ == "__main__": test_rabin_karp()
168
"""simple docstring""" import math import random def a__ ( snake_case__ , snake_case__ = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value lowerCAmelCase : Dict = 0.0_2 def a__ ( snake_case__ , snake_case__ ) -> float: lowerCamelCase = float(2 * (random.randint(1 , 1_00 )) - 1 ) for _ in range(snake_case__ ): # Forward propagation lowerCamelCase = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? lowerCamelCase = (expected / 1_00) - layer_a # Error delta lowerCamelCase = layer_1_error * sigmoid_function(snake_case__ , snake_case__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_00 if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : Any = int(input("""Expected value: """)) lowerCAmelCase : List[Any] = int(input("""Number of propagations: """)) print(forward_propagation(expected, number_propagations))
168
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary # Register SEW's fairseq modules from sew_asapp import tasks # noqa: F401 from transformers import ( SEWConfig, SEWForCTC, SEWModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowerCamelCase : Dict = logging.get_logger(__name__) __lowerCamelCase : Optional[int] = { '''post_extract_proj''': '''feature_projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.upsample.0''': '''encoder.upsample.projection''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''layer_norm''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : str ) -> str: """simple docstring""" for attribute in key.split(""".""" ): SCREAMING_SNAKE_CASE__ = getattr(_A , _A ) if weight_type is not None: SCREAMING_SNAKE_CASE__ = getattr(_A , _A ).shape else: SCREAMING_SNAKE_CASE__ = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": SCREAMING_SNAKE_CASE__ = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE__ = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE__ = value elif weight_type == "bias": SCREAMING_SNAKE_CASE__ = value else: SCREAMING_SNAKE_CASE__ = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict() SCREAMING_SNAKE_CASE__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): SCREAMING_SNAKE_CASE__ = False if "conv_layers" in name: load_conv_layer( _A , _A , _A , _A , hf_model.config.feat_extract_norm == """group""" , ) SCREAMING_SNAKE_CASE__ = True else: for key, mapped_key in MAPPING.items(): SCREAMING_SNAKE_CASE__ = """sew.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: SCREAMING_SNAKE_CASE__ = True if "*" in mapped_key: SCREAMING_SNAKE_CASE__ = name.split(_A )[0].split(""".""" )[-2] SCREAMING_SNAKE_CASE__ = mapped_key.replace("""*""" , _A ) if "weight_g" in name: SCREAMING_SNAKE_CASE__ = """weight_g""" elif "weight_v" in name: SCREAMING_SNAKE_CASE__ = """weight_v""" elif "weight" in name: SCREAMING_SNAKE_CASE__ = """weight""" elif "bias" in name: SCREAMING_SNAKE_CASE__ = """bias""" else: SCREAMING_SNAKE_CASE__ = None set_recursively(_A , _A , _A , _A , _A ) continue if not is_used: unused_weights.append(_A ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = full_name.split("""conv_layers.""" )[-1] SCREAMING_SNAKE_CASE__ = name.split(""".""" ) SCREAMING_SNAKE_CASE__ = int(items[0] ) SCREAMING_SNAKE_CASE__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) SCREAMING_SNAKE_CASE__ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) SCREAMING_SNAKE_CASE__ = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) SCREAMING_SNAKE_CASE__ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) SCREAMING_SNAKE_CASE__ = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_A ) def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : Any ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ = SEWConfig() if is_finetuned: SCREAMING_SNAKE_CASE__ = model.wav_encoder.wav_model.cfg else: SCREAMING_SNAKE_CASE__ = model.cfg SCREAMING_SNAKE_CASE__ = fs_config.conv_bias SCREAMING_SNAKE_CASE__ = eval(fs_config.conv_feature_layers ) SCREAMING_SNAKE_CASE__ = [x[0] for x in conv_layers] SCREAMING_SNAKE_CASE__ = [x[1] for x in conv_layers] SCREAMING_SNAKE_CASE__ = [x[2] for x in conv_layers] SCREAMING_SNAKE_CASE__ = """gelu""" SCREAMING_SNAKE_CASE__ = """layer""" if fs_config.extractor_mode == """layer_norm""" else """group""" SCREAMING_SNAKE_CASE__ = 0.0 SCREAMING_SNAKE_CASE__ = fs_config.activation_fn.name SCREAMING_SNAKE_CASE__ = fs_config.encoder_embed_dim SCREAMING_SNAKE_CASE__ = 0.02 SCREAMING_SNAKE_CASE__ = fs_config.encoder_ffn_embed_dim SCREAMING_SNAKE_CASE__ = 1E-5 SCREAMING_SNAKE_CASE__ = fs_config.encoder_layerdrop SCREAMING_SNAKE_CASE__ = fs_config.encoder_attention_heads SCREAMING_SNAKE_CASE__ = fs_config.conv_pos_groups SCREAMING_SNAKE_CASE__ = fs_config.conv_pos SCREAMING_SNAKE_CASE__ = len(_A ) SCREAMING_SNAKE_CASE__ = fs_config.encoder_layers SCREAMING_SNAKE_CASE__ = fs_config.squeeze_factor # take care of any params that are overridden by the Wav2VecCtc model if is_finetuned: SCREAMING_SNAKE_CASE__ = model.cfg SCREAMING_SNAKE_CASE__ = fs_config.final_dropout SCREAMING_SNAKE_CASE__ = fs_config.layerdrop SCREAMING_SNAKE_CASE__ = fs_config.activation_dropout SCREAMING_SNAKE_CASE__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0 SCREAMING_SNAKE_CASE__ = fs_config.attention_dropout SCREAMING_SNAKE_CASE__ = fs_config.dropout_input SCREAMING_SNAKE_CASE__ = fs_config.dropout SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_length SCREAMING_SNAKE_CASE__ = fs_config.mask_channel_prob SCREAMING_SNAKE_CASE__ = fs_config.mask_length SCREAMING_SNAKE_CASE__ = fs_config.mask_prob SCREAMING_SNAKE_CASE__ = """Wav2Vec2FeatureExtractor""" SCREAMING_SNAKE_CASE__ = """Wav2Vec2CTCTokenizer""" return config @torch.no_grad() def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=True ) -> Dict: """simple docstring""" if is_finetuned: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) if config_path is not None: SCREAMING_SNAKE_CASE__ = SEWConfig.from_pretrained(_A ) else: SCREAMING_SNAKE_CASE__ = convert_config(model[0] , _A ) SCREAMING_SNAKE_CASE__ = model[0].eval() SCREAMING_SNAKE_CASE__ = True if config.feat_extract_norm == """layer""" else False SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , ) if is_finetuned: if dict_path: SCREAMING_SNAKE_CASE__ = Dictionary.load(_A ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq SCREAMING_SNAKE_CASE__ = target_dict.pad_index SCREAMING_SNAKE_CASE__ = target_dict.bos_index SCREAMING_SNAKE_CASE__ = target_dict.pad_index SCREAMING_SNAKE_CASE__ = target_dict.bos_index SCREAMING_SNAKE_CASE__ = target_dict.eos_index SCREAMING_SNAKE_CASE__ = len(target_dict.symbols ) SCREAMING_SNAKE_CASE__ = os.path.join(_A , """vocab.json""" ) if not os.path.isdir(_A ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_A ) ) return os.makedirs(_A , exist_ok=_A ) with open(_A , """w""" , encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices , _A ) SCREAMING_SNAKE_CASE__ = WavaVecaCTCTokenizer( _A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_A , ) SCREAMING_SNAKE_CASE__ = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A ) processor.save_pretrained(_A ) SCREAMING_SNAKE_CASE__ = SEWForCTC(_A ) else: SCREAMING_SNAKE_CASE__ = SEWModel(_A ) feature_extractor.save_pretrained(_A ) recursively_load_weights(_A , _A , _A ) hf_model.save_pretrained(_A ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __lowerCamelCase : Any = parser.parse_args() convert_sew_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned )
219
from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A = logging.get_logger(__name__) _A = { '''nielsr/canine-s''': 2_048, } # Unicode defines 1,114,112 total “codepoints” _A = 1_114_112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _A = 0 _A = 0xe0_00 _A = 0xe0_01 _A = 0xe0_02 _A = 0xe0_03 _A = 0xe0_04 # Maps special codepoints to human-readable names. _A = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class A ( __UpperCAmelCase ): __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=chr(UpperCamelCase__ ), UpperCamelCase__=False, UpperCamelCase__=2048, **UpperCamelCase__, ): """simple docstring""" lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else bos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else eos_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else sep_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else cls_token lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase_ = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else mask_token super().__init__( bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, model_max_length=UpperCamelCase__, **UpperCamelCase__, ) # Creates a mapping for looking up the IDs of special symbols. lowerCAmelCase_ = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): lowerCAmelCase_ = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. lowerCAmelCase_ = { codepoint: name for name, codepoint in self._special_codepoints.items() } lowerCAmelCase_ = UNICODE_VOCAB_SIZE lowerCAmelCase_ = len(self._special_codepoints ) @property def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" return self._unicode_vocab_size def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return list(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: return ord(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid token: '{token}'" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(UpperCamelCase__ ) except TypeError: raise ValueError(f"invalid id: {index}" ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ): """simple docstring""" return "".join(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None, UpperCamelCase__ = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__, token_ids_a=UpperCamelCase__, already_has_special_tokens=UpperCamelCase__ ) lowerCAmelCase_ = [1] + ([0] * len(UpperCamelCase__ )) + [1] if token_ids_a is not None: result += ([0] * len(UpperCamelCase__ )) + [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" lowerCAmelCase_ = [self.sep_token_id] lowerCAmelCase_ = [self.cls_token_id] lowerCAmelCase_ = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ = None ): """simple docstring""" return ()
278
0
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax lowerCAmelCase : str =logging.get_logger(__name__) @add_end_docstrings(_a ) class a_ ( _a ): def __init__( self : int , **lowercase : int ): """simple docstring""" super().__init__(**__lowerCAmelCase ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Dict , lowercase : Optional[int] , **lowercase : Optional[Any] ): """simple docstring""" return super().__call__(__lowerCAmelCase , **__lowerCAmelCase ) def lowercase__ ( self : Optional[Any] , **lowercase : Tuple ): """simple docstring""" lowercase_ :List[Any] = {} if "candidate_labels" in kwargs: lowercase_ :Optional[Any] = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowercase_ :str = kwargs["hypothesis_template"] return preprocess_params, {}, {} def lowercase__ ( self : Union[str, Any] , lowercase : str , lowercase : Any=None , lowercase : Union[str, Any]="This is a photo of {}." ): """simple docstring""" lowercase_ :Optional[Any] = load_image(__lowerCAmelCase ) lowercase_ :Union[str, Any] = self.image_processor(images=[image] , return_tensors=self.framework ) lowercase_ :Union[str, Any] = candidate_labels lowercase_ :str = [hypothesis_template.format(__lowerCAmelCase ) for x in candidate_labels] lowercase_ :Tuple = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework , padding=__lowerCAmelCase ) lowercase_ :Optional[int] = [text_inputs] return inputs def lowercase__ ( self : Optional[Any] , lowercase : List[Any] ): """simple docstring""" lowercase_ :Dict = model_inputs.pop("candidate_labels" ) lowercase_ :Union[str, Any] = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , __lowerCAmelCase ): lowercase_ :int = text_inputs[0] else: # Batching case. lowercase_ :int = text_inputs[0][0] lowercase_ :str = self.model(**__lowerCAmelCase , **__lowerCAmelCase ) lowercase_ :Optional[Any] = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def lowercase__ ( self : List[str] , lowercase : Union[str, Any] ): """simple docstring""" lowercase_ :List[Any] = model_outputs.pop("candidate_labels" ) lowercase_ :Tuple = model_outputs["logits"][0] if self.framework == "pt": lowercase_ :int = logits.softmax(dim=-1 ).squeeze(-1 ) lowercase_ :Dict = probs.tolist() if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): lowercase_ :Union[str, Any] = [scores] elif self.framework == "tf": lowercase_ :List[str] = stable_softmax(__lowerCAmelCase , axis=-1 ) lowercase_ :Dict = probs.numpy().tolist() else: raise ValueError(F'Unsupported framework: {self.framework}' ) lowercase_ :int = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda lowercase : -x[0] ) ] return result
371
'''simple docstring''' import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig lowerCAmelCase : Any ={ '''facebook/maskformer-swin-base-ade''': ( '''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json''' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } lowerCAmelCase : int =logging.get_logger(__name__) class a_ ( _lowerCAmelCase ): __A = "maskformer" __A = {"hidden_size": "mask_feature_size"} __A = ["resnet", "swin"] __A = ["detr"] def __init__( self : List[Any] , lowercase : int = 256 , lowercase : int = 256 , lowercase : float = 0.1 , lowercase : bool = False , lowercase : Optional[Dict] = None , lowercase : Optional[Dict] = None , lowercase : float = 0.02 , lowercase : float = 1.0 , lowercase : float = 1.0 , lowercase : float = 1.0 , lowercase : float = 20.0 , lowercase : Optional[bool] = None , **lowercase : Any , ): """simple docstring""" if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k lowercase_ :Any = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(lowercase , lowercase ): lowercase_ :Optional[int] = backbone_config.pop("model_type" ) lowercase_ :Optional[int] = CONFIG_MAPPING[backbone_model_type] lowercase_ :int = config_class.from_dict(lowercase ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ' F'Supported model types: {",".join(self.backbones_supported )}' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 lowercase_ :Optional[Any] = DetrConfig() else: # verify that the decoder is supported lowercase_ :Tuple = ( decoder_config.pop("model_type" ) if isinstance(lowercase , lowercase ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( F'Transformer Decoder {decoder_type} not supported, please use one of' F' {",".join(self.decoders_supported )}' ) if isinstance(lowercase , lowercase ): lowercase_ :str = CONFIG_MAPPING[decoder_type] lowercase_ :List[str] = config_class.from_dict(lowercase ) lowercase_ :str = backbone_config lowercase_ :Union[str, Any] = decoder_config # main feature dimension for the model lowercase_ :Any = fpn_feature_size lowercase_ :Optional[int] = mask_feature_size # initializer lowercase_ :List[Any] = init_std lowercase_ :Union[str, Any] = init_xavier_std # Hungarian matcher && loss lowercase_ :List[str] = cross_entropy_weight lowercase_ :int = dice_weight lowercase_ :List[str] = mask_weight lowercase_ :Optional[Any] = use_auxiliary_loss lowercase_ :str = no_object_weight lowercase_ :int = output_auxiliary_logits lowercase_ :Optional[Any] = self.decoder_config.encoder_attention_heads lowercase_ :int = self.decoder_config.num_hidden_layers super().__init__(**lowercase ) @classmethod def lowercase__ ( cls : Tuple , lowercase : PretrainedConfig , lowercase : PretrainedConfig , **lowercase : Union[str, Any] ): """simple docstring""" return cls( backbone_config=lowercase , decoder_config=lowercase , **lowercase , ) def lowercase__ ( self : Optional[Any] ): """simple docstring""" lowercase_ :str = copy.deepcopy(self.__dict__ ) lowercase_ :int = self.backbone_config.to_dict() lowercase_ :List[Any] = self.decoder_config.to_dict() lowercase_ :Optional[Any] = self.__class__.model_type return output
147
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt') lowerCAmelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f: lowercase__ = Image.open(SCREAMING_SNAKE_CASE ) return im.convert('''RGB''' ) @dataclass class _a : _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={ '''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).''' } , ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) _lowercase : Optional[str] = field(default=UpperCamelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) _lowercase : Optional[str] = field(default=UpperCamelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) _lowercase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) _lowercase : Optional[int] = field( default=UpperCamelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) _lowercase : Optional[int] = field( default=UpperCamelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( '''You must specify either a dataset name from the hub or a train and/or validation directory.''' ) @dataclass class _a : _lowercase : str = field( default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase__ )} , ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) _lowercase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) _lowercase : str = field(default=UpperCamelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) _lowercase : bool = field( default=UpperCamelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) _lowercase : bool = field( default=UpperCamelCase__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = torch.stack([example['''pixel_values'''] for example in examples] ) lowercase__ = torch.tensor([example['''labels'''] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def _a ( ): """simple docstring""" lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_image_classification''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase__ = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. lowercase__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: lowercase__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , ) else: lowercase__ = {} if data_args.train_dir is not None: lowercase__ = os.path.join(data_args.train_dir , '''**''' ) if data_args.validation_dir is not None: lowercase__ = os.path.join(data_args.validation_dir , '''**''' ) lowercase__ = load_dataset( '''imagefolder''' , data_files=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , task='''image-classification''' , ) # If we don't have a validation split, split off a percentage of train as validation. lowercase__ = None if '''validation''' in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0: lowercase__ = dataset['''train'''].train_test_split(data_args.train_val_split ) lowercase__ = split['''train'''] lowercase__ = split['''test'''] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. lowercase__ = dataset['''train'''].features['''labels'''].names lowercase__ , lowercase__ = {}, {} for i, label in enumerate(SCREAMING_SNAKE_CASE ): lowercase__ = str(SCREAMING_SNAKE_CASE ) lowercase__ = label # Load the accuracy metric from the datasets package lowercase__ = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(SCREAMING_SNAKE_CASE ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) lowercase__ = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowercase__ = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) lowercase__ = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: lowercase__ = image_processor.size['''shortest_edge'''] else: lowercase__ = (image_processor.size['''height'''], image_processor.size['''width''']) lowercase__ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) lowercase__ = Compose( [ RandomResizedCrop(SCREAMING_SNAKE_CASE ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) lowercase__ = Compose( [ Resize(SCREAMING_SNAKE_CASE ), CenterCrop(SCREAMING_SNAKE_CASE ), ToTensor(), normalize, ] ) def train_transforms(SCREAMING_SNAKE_CASE ): lowercase__ = [ _train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image'''] ] return example_batch def val_transforms(SCREAMING_SNAKE_CASE ): lowercase__ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: lowercase__ = ( dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(SCREAMING_SNAKE_CASE ) if training_args.do_eval: if "validation" not in dataset: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: lowercase__ = ( dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(SCREAMING_SNAKE_CASE ) # Initalize our trainer lowercase__ = Trainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: lowercase__ = None if training_args.resume_from_checkpoint is not None: lowercase__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase__ = last_checkpoint lowercase__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase__ = trainer.evaluate() trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE ) trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub lowercase__ = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''image-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''image-classification''', '''vision'''], } if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
110
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt') lowerCAmelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f: lowercase__ = Image.open(SCREAMING_SNAKE_CASE ) return im.convert('''RGB''' ) @dataclass class _a : _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={ '''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).''' } , ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) _lowercase : Optional[str] = field(default=UpperCamelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) _lowercase : Optional[str] = field(default=UpperCamelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) _lowercase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) _lowercase : Optional[int] = field( default=UpperCamelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) _lowercase : Optional[int] = field( default=UpperCamelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCamelCase_ ( self: Any ) -> int: """simple docstring""" if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( '''You must specify either a dataset name from the hub or a train and/or validation directory.''' ) @dataclass class _a : _lowercase : str = field( default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase__ )} , ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) _lowercase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) _lowercase : str = field(default=UpperCamelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) _lowercase : bool = field( default=UpperCamelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) _lowercase : bool = field( default=UpperCamelCase__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = torch.stack([example['''pixel_values'''] for example in examples] ) lowercase__ = torch.tensor([example['''labels'''] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def _a ( ): """simple docstring""" lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_image_classification''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase__ = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. lowercase__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: lowercase__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , ) else: lowercase__ = {} if data_args.train_dir is not None: lowercase__ = os.path.join(data_args.train_dir , '''**''' ) if data_args.validation_dir is not None: lowercase__ = os.path.join(data_args.validation_dir , '''**''' ) lowercase__ = load_dataset( '''imagefolder''' , data_files=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , task='''image-classification''' , ) # If we don't have a validation split, split off a percentage of train as validation. lowercase__ = None if '''validation''' in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0: lowercase__ = dataset['''train'''].train_test_split(data_args.train_val_split ) lowercase__ = split['''train'''] lowercase__ = split['''test'''] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. lowercase__ = dataset['''train'''].features['''labels'''].names lowercase__ , lowercase__ = {}, {} for i, label in enumerate(SCREAMING_SNAKE_CASE ): lowercase__ = str(SCREAMING_SNAKE_CASE ) lowercase__ = label # Load the accuracy metric from the datasets package lowercase__ = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(SCREAMING_SNAKE_CASE ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) lowercase__ = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) lowercase__ = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) lowercase__ = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: lowercase__ = image_processor.size['''shortest_edge'''] else: lowercase__ = (image_processor.size['''height'''], image_processor.size['''width''']) lowercase__ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) lowercase__ = Compose( [ RandomResizedCrop(SCREAMING_SNAKE_CASE ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) lowercase__ = Compose( [ Resize(SCREAMING_SNAKE_CASE ), CenterCrop(SCREAMING_SNAKE_CASE ), ToTensor(), normalize, ] ) def train_transforms(SCREAMING_SNAKE_CASE ): lowercase__ = [ _train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image'''] ] return example_batch def val_transforms(SCREAMING_SNAKE_CASE ): lowercase__ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: lowercase__ = ( dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(SCREAMING_SNAKE_CASE ) if training_args.do_eval: if "validation" not in dataset: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: lowercase__ = ( dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(SCREAMING_SNAKE_CASE ) # Initalize our trainer lowercase__ = Trainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: lowercase__ = None if training_args.resume_from_checkpoint is not None: lowercase__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase__ = last_checkpoint lowercase__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase__ = trainer.evaluate() trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE ) trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub lowercase__ = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''image-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''image-classification''', '''vision'''], } if training_args.push_to_hub: trainer.push_to_hub(**SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
110
1
"""simple docstring""" from typing import TYPE_CHECKING from ..utils import _LazyModule __A = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys __A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
341
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =1 _lowerCAmelCase =3 _lowerCAmelCase =(32, 32) _lowerCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase ) return image @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =UNetaDConditionModel( block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__UpperCAmelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , ) return model @property def _lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _lowerCAmelCase =AutoencoderKL( block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) return model @property def _lowerCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _lowerCAmelCase =CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , ) return CLIPTextModel(__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__UpperCAmelCase , )[0] _lowerCAmelCase =image[0, -3:, -3:, -1] _lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1] _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) _lowerCAmelCase =np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 _lowerCAmelCase =torch.Generator(device=__UpperCAmelCase ).manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , ) _lowerCAmelCase =output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.dummy_cond_unet_upscale _lowerCAmelCase =DDPMScheduler() _lowerCAmelCase =DDIMScheduler(prediction_type="""v_prediction""" ) _lowerCAmelCase =self.dummy_vae _lowerCAmelCase =self.dummy_text_encoder _lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _lowerCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] _lowerCAmelCase =Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert("""RGB""" ).resize((64, 64) ) # put models in fp16, except vae as it overflows in fp16 _lowerCAmelCase =unet.half() _lowerCAmelCase =text_encoder.half() # make sure here that pndm scheduler skips prk _lowerCAmelCase =StableDiffusionUpscalePipeline( unet=__UpperCAmelCase , low_res_scheduler=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , max_noise_level=3_50 , ) _lowerCAmelCase =sd_pipe.to(__UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase ) _lowerCAmelCase ="""A painting of a squirrel eating a burger""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =sd_pipe( [prompt] , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ).images _lowerCAmelCase =low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' def _lowerCAmelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 1e-3 def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale""" """/upsampled_cat_fp16.npy""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="""np""" , ) _lowerCAmelCase =output.images[0] assert image.shape == (5_12, 5_12, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowerCAmelCase ( self ) -> Optional[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _lowerCAmelCase =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/sd2-upscale/low_res_cat.png""" ) _lowerCAmelCase ="""stabilityai/stable-diffusion-x4-upscaler""" _lowerCAmelCase =StableDiffusionUpscalePipeline.from_pretrained( __UpperCAmelCase , torch_dtype=torch.floataa , ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _lowerCAmelCase ="""a cat sitting on a park bench""" _lowerCAmelCase =torch.manual_seed(0 ) _lowerCAmelCase =pipe( prompt=__UpperCAmelCase , image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , output_type="""np""" , ) _lowerCAmelCase =torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9
341
1
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCAmelCase__ ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : Optional[Any] , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Dict) ->Dict: '''simple docstring''' super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__) self.check_model_type(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=None , **UpperCAmelCase__ : Any) ->str: '''simple docstring''' A__ , A__ = {}, {} if padding is not None: A__ = padding if truncation is not None: A__ = truncation if top_k is not None: A__ = top_k return preprocess_params, {}, postprocess_params def __call__( self : Dict , UpperCAmelCase__ : Union["Image.Image", str] , UpperCAmelCase__ : str = None , **UpperCAmelCase__ : List[Any]) ->List[Any]: '''simple docstring''' if isinstance(UpperCAmelCase__ , (Image.Image, str)) and isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = {'''image''': image, '''question''': question} else: A__ = image A__ = super().__call__(UpperCAmelCase__ , **UpperCAmelCase__) return results def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Tuple=False) ->Optional[Any]: '''simple docstring''' A__ = load_image(inputs['''image''']) A__ = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__) A__ = self.image_processor(images=UpperCAmelCase__ , return_tensors=self.framework) model_inputs.update(UpperCAmelCase__) return model_inputs def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : List[Any]) ->str: '''simple docstring''' A__ = self.model(**UpperCAmelCase__) return model_outputs def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]=5) ->List[Any]: '''simple docstring''' if top_k > self.model.config.num_labels: A__ = self.model.config.num_labels if self.framework == "pt": A__ = model_outputs.logits.sigmoid()[0] A__ , A__ = probs.topk(UpperCAmelCase__) else: raise ValueError(f"""Unsupported framework: {self.framework}""") A__ = scores.tolist() A__ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase__ , UpperCAmelCase__)]
14
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch _lowerCamelCase : List[Any] = """sshleifer/bart-tiny-random""" _lowerCamelCase : List[Any] = """patrickvonplaten/t5-tiny-random""" @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int: '''simple docstring''' return AutoConfig.from_pretrained(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' A__ , *A__ = create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__)
14
1
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch __UpperCAmelCase = random.Random() def snake_case_ (__A : Tuple , __A : str=1.0 , __A : Dict=None , __A : Any=None ) -> List[str]: if rng is None: __lowerCAmelCase : List[str] = global_rng __lowerCAmelCase : Union[str, Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any]=7 , lowerCAmelCase : List[Any]=4_00 , lowerCAmelCase : List[str]=20_00 , lowerCAmelCase : List[str]=10 , lowerCAmelCase : List[str]=1_60 , lowerCAmelCase : Any=8 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Optional[Any]=40_00 , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=True , ) -> Any: """simple docstring""" __lowerCAmelCase : List[str] = parent __lowerCAmelCase : int = batch_size __lowerCAmelCase : Any = min_seq_length __lowerCAmelCase : Optional[int] = max_seq_length __lowerCAmelCase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __lowerCAmelCase : Dict = padding_value __lowerCAmelCase : List[str] = sampling_rate __lowerCAmelCase : str = return_attention_mask __lowerCAmelCase : Optional[int] = do_normalize __lowerCAmelCase : Optional[Any] = feature_size __lowerCAmelCase : Tuple = chunk_length __lowerCAmelCase : Dict = hop_length def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: """simple docstring""" return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : str=False , lowerCAmelCase : List[str]=False ) -> Dict: """simple docstring""" def _flatten(lowerCAmelCase : Tuple ): return list(itertools.chain(*lowerCAmelCase ) ) if equal_length: __lowerCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size __lowerCAmelCase : Union[str, Any] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __lowerCAmelCase : Dict = [np.asarray(lowerCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ): """simple docstring""" lowerCamelCase : List[str] =WhisperFeatureExtractor if is_speech_available() else None def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: """simple docstring""" __lowerCAmelCase : str = WhisperFeatureExtractionTester(self ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : List[Any] = feat_extract_first.save_pretrained(lowerCAmelCase )[0] check_json_file_has_correct_format(lowerCAmelCase ) __lowerCAmelCase : Optional[int] = self.feature_extraction_class.from_pretrained(lowerCAmelCase ) __lowerCAmelCase : Tuple = feat_extract_first.to_dict() __lowerCAmelCase : Union[str, Any] = feat_extract_second.to_dict() __lowerCAmelCase : Union[str, Any] = feat_extract_first.mel_filters __lowerCAmelCase : List[Any] = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) ) self.assertEqual(lowerCAmelCase , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: """simple docstring""" __lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase : Any = os.path.join(lowerCAmelCase , """feat_extract.json""" ) feat_extract_first.to_json_file(lowerCAmelCase ) __lowerCAmelCase : Any = self.feature_extraction_class.from_json_file(lowerCAmelCase ) __lowerCAmelCase : Dict = feat_extract_first.to_dict() __lowerCAmelCase : Any = feat_extract_second.to_dict() __lowerCAmelCase : List[Any] = feat_extract_first.mel_filters __lowerCAmelCase : Union[str, Any] = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase ) ) self.assertEqual(lowerCAmelCase , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __lowerCAmelCase : Dict = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] __lowerCAmelCase : Dict = [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs] # Test feature size __lowerCAmelCase : Dict = feature_extractor(lowerCAmelCase , padding="""max_length""" , return_tensors="""np""" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input __lowerCAmelCase : Tuple = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features __lowerCAmelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) # Test batched __lowerCAmelCase : List[str] = feature_extractor(lowerCAmelCase , return_tensors="""np""" ).input_features __lowerCAmelCase : str = feature_extractor(lowerCAmelCase , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. __lowerCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] __lowerCAmelCase : List[Any] = np.asarray(lowerCAmelCase ) __lowerCAmelCase : Optional[Any] = feature_extractor(lowerCAmelCase , return_tensors="""np""" ).input_features __lowerCAmelCase : Tuple = feature_extractor(lowerCAmelCase , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) # Test truncation required __lowerCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )] __lowerCAmelCase : Tuple = [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs] __lowerCAmelCase : Dict = [x[: feature_extractor.n_samples] for x in speech_inputs] __lowerCAmelCase : List[str] = [np.asarray(lowerCAmelCase ) for speech_input in speech_inputs_truncated] __lowerCAmelCase : Dict = feature_extractor(lowerCAmelCase , return_tensors="""np""" ).input_features __lowerCAmelCase : Optional[int] = feature_extractor(lowerCAmelCase , return_tensors="""np""" ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase , lowerCAmelCase ): self.assertTrue(np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: """simple docstring""" import torch __lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : int = np.random.rand(1_00 , 32 ).astype(np.floataa ) __lowerCAmelCase : Any = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __lowerCAmelCase : List[str] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) __lowerCAmelCase : List[Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" __lowerCAmelCase : Any = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech __lowerCAmelCase : int = ds.sort("""id""" ).select(range(lowerCAmelCase ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def SCREAMING_SNAKE_CASE ( self : Dict ) -> str: """simple docstring""" __lowerCAmelCase : Optional[Any] = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on __lowerCAmelCase : Dict = self._load_datasamples(1 ) __lowerCAmelCase : Dict = WhisperFeatureExtractor() __lowerCAmelCase : List[str] = feature_extractor(lowerCAmelCase , return_tensors="""pt""" ).input_features self.assertEqual(input_features.shape , (1, 80, 30_00) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCAmelCase , atol=1e-4 ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __lowerCAmelCase : List[str] = self._load_datasamples(1 )[0] __lowerCAmelCase : Any = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue __lowerCAmelCase : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCAmelCase )[0] self.assertTrue(np.all(np.mean(lowerCAmelCase ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase ) - 1 ) < 1e-3 ) )
139
import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def snake_case_ (__A : Optional[Any] ) -> Tuple: __lowerCAmelCase : Optional[int] = SwinConfig() __lowerCAmelCase : List[Any] = swin_name.split("""_""" ) __lowerCAmelCase : Dict = name_split[1] __lowerCAmelCase : Optional[Any] = int(name_split[4] ) __lowerCAmelCase : List[Any] = int(name_split[3][-1] ) if model_size == "tiny": __lowerCAmelCase : List[Any] = 9_6 __lowerCAmelCase : List[Any] = (2, 2, 6, 2) __lowerCAmelCase : Optional[Any] = (3, 6, 1_2, 2_4) elif model_size == "small": __lowerCAmelCase : List[Any] = 9_6 __lowerCAmelCase : Optional[int] = (2, 2, 1_8, 2) __lowerCAmelCase : Optional[int] = (3, 6, 1_2, 2_4) elif model_size == "base": __lowerCAmelCase : List[Any] = 1_2_8 __lowerCAmelCase : Tuple = (2, 2, 1_8, 2) __lowerCAmelCase : int = (4, 8, 1_6, 3_2) else: __lowerCAmelCase : List[Any] = 1_9_2 __lowerCAmelCase : List[str] = (2, 2, 1_8, 2) __lowerCAmelCase : int = (6, 1_2, 2_4, 4_8) if "in22k" in swin_name: __lowerCAmelCase : Dict = 2_1_8_4_1 else: __lowerCAmelCase : Optional[Any] = 1_0_0_0 __lowerCAmelCase : Union[str, Any] = """huggingface/label-files""" __lowerCAmelCase : Any = """imagenet-1k-id2label.json""" __lowerCAmelCase : Any = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) ) __lowerCAmelCase : int = {int(__A ): v for k, v in idalabel.items()} __lowerCAmelCase : str = idalabel __lowerCAmelCase : int = {v: k for k, v in idalabel.items()} __lowerCAmelCase : Optional[Any] = img_size __lowerCAmelCase : Optional[Any] = num_classes __lowerCAmelCase : Tuple = embed_dim __lowerCAmelCase : Union[str, Any] = depths __lowerCAmelCase : Optional[Any] = num_heads __lowerCAmelCase : Tuple = window_size return config def snake_case_ (__A : int ) -> Optional[Any]: if "patch_embed.proj" in name: __lowerCAmelCase : Optional[Any] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __lowerCAmelCase : List[Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: __lowerCAmelCase : int = """encoder.""" + name if "attn.proj" in name: __lowerCAmelCase : Tuple = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: __lowerCAmelCase : Optional[Any] = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: __lowerCAmelCase : Dict = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __lowerCAmelCase : Dict = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __lowerCAmelCase : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __lowerCAmelCase : str = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "norm.weight": __lowerCAmelCase : Dict = """layernorm.weight""" if name == "norm.bias": __lowerCAmelCase : Optional[int] = """layernorm.bias""" if "head" in name: __lowerCAmelCase : int = name.replace("""head""" , """classifier""" ) else: __lowerCAmelCase : List[str] = """swin.""" + name return name def snake_case_ (__A : List[Any] , __A : str ) -> int: for key in orig_state_dict.copy().keys(): __lowerCAmelCase : Tuple = orig_state_dict.pop(__A ) if "mask" in key: continue elif "qkv" in key: __lowerCAmelCase : Any = key.split(""".""" ) __lowerCAmelCase : Union[str, Any] = int(key_split[1] ) __lowerCAmelCase : Optional[Any] = int(key_split[3] ) __lowerCAmelCase : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __lowerCAmelCase : List[str] = val[:dim, :] __lowerCAmelCase : List[Any] = val[ dim : dim * 2, : ] __lowerCAmelCase : str = val[-dim:, :] else: __lowerCAmelCase : str = val[ :dim ] __lowerCAmelCase : int = val[ dim : dim * 2 ] __lowerCAmelCase : int = val[ -dim: ] else: __lowerCAmelCase : Tuple = val return orig_state_dict def snake_case_ (__A : Union[str, Any] , __A : int ) -> Any: __lowerCAmelCase : List[Any] = timm.create_model(__A , pretrained=__A ) timm_model.eval() __lowerCAmelCase : str = get_swin_config(__A ) __lowerCAmelCase : Any = SwinForImageClassification(__A ) model.eval() __lowerCAmelCase : str = convert_state_dict(timm_model.state_dict() , __A ) model.load_state_dict(__A ) __lowerCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" __lowerCAmelCase : Any = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) ) __lowerCAmelCase : List[Any] = Image.open(requests.get(__A , stream=__A ).raw ) __lowerCAmelCase : List[str] = image_processor(images=__A , return_tensors="""pt""" ) __lowerCAmelCase : Tuple = timm_model(inputs["""pixel_values"""] ) __lowerCAmelCase : Dict = model(**__A ).logits assert torch.allclose(__A , __A , atol=1e-3 ) print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__A ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__A ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swin_name""", default="""swin_tiny_patch4_window7_224""", type=str, help="""Name of the Swin timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) __UpperCAmelCase = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
139
1
from __future__ import annotations def A (__A : list[int] , __A : list[int] , __A : int ) -> str: """simple docstring""" UpperCAmelCase_ = list(range(len(UpperCamelCase__ ) ) ) UpperCAmelCase_ = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )] index.sort(key=lambda __A : ratio[i] , reverse=UpperCamelCase__ ) UpperCAmelCase_ = 0 UpperCAmelCase_ = [0] * len(UpperCamelCase__ ) for i in index: if weight[i] <= capacity: UpperCAmelCase_ = 1 max_value += value[i] capacity -= weight[i] else: UpperCAmelCase_ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
51
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase :Optional[int] = logging.get_logger(__name__) _lowerCAmelCase :List[str] = '▁' _lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'} _lowerCAmelCase :List[Any] = { 'vocab_file': { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model' ), } } _lowerCAmelCase :Tuple = { 'xlm-roberta-base': 512, 'xlm-roberta-large': 512, 'xlm-roberta-large-finetuned-conll02-dutch': 512, 'xlm-roberta-large-finetuned-conll02-spanish': 512, 'xlm-roberta-large-finetuned-conll03-english': 512, 'xlm-roberta-large-finetuned-conll03-german': 512, } class _UpperCAmelCase ( a ): '''simple docstring''' a__ =VOCAB_FILES_NAMES a__ =PRETRAINED_VOCAB_FILES_MAP a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ =['''input_ids''', '''attention_mask'''] def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token _UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , ) _UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A ) ) _UpperCAmelCase : List[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCAmelCase : Any = 1 _UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset _UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Union[str, Any]: _UpperCAmelCase : Tuple = self.__dict__.copy() _UpperCAmelCase : List[str] = None _UpperCAmelCase : str = self.sp_model.serialized_model_proto() return state def __setstate__( self , A ) -> Optional[int]: _UpperCAmelCase : Optional[int] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase : Optional[Any] = {} _UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowerCAmelCase ( self , A , A = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase : Any = [self.cls_token_id] _UpperCAmelCase : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A , token_ids_a=A , already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1] def __lowerCAmelCase ( self , A , A = None ) -> List[int]: _UpperCAmelCase : Dict = [self.sep_token_id] _UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __lowerCAmelCase ( self ) -> Dict: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self , A ) -> List[str]: return self.sp_model.encode(A , out_type=A ) def __lowerCAmelCase ( self , A ) -> Any: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCAmelCase : Any = self.sp_model.PieceToId(A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowerCAmelCase ( self , A ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCAmelCase ( self , A ) -> int: _UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip() return out_string def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]: if not os.path.isdir(A ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase : List[Any] = os.path.join( A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A ) elif not os.path.isfile(self.vocab_file ): with open(A , '''wb''' ) as fi: _UpperCAmelCase : str = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,)
263
0
"""simple docstring""" import random from .binary_exp_mod import bin_exp_mod def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=1_000 ) -> Tuple: '''simple docstring''' if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd _UpperCAmelCase : int = n - 1 _UpperCAmelCase : Union[str, Any] = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) _UpperCAmelCase : List[str] = 0 while count < prec: _UpperCAmelCase : Dict = random.randint(2 , n - 1 ) _UpperCAmelCase : int = bin_exp_mod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if b != 1: _UpperCAmelCase : Optional[int] = True for _ in range(__lowerCamelCase ): if b == n - 1: _UpperCAmelCase : Any = False break _UpperCAmelCase : List[str] = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = abs(int(input("Enter bound : ").strip())) print("Here\'s the list of primes:") print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
351
"""simple docstring""" from __future__ import annotations from typing import Any def __snake_case ( SCREAMING_SNAKE_CASE__ : list[Any] ) -> None: '''simple docstring''' create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 ) def __snake_case ( SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : int ) -> None: '''simple docstring''' if index == len(SCREAMING_SNAKE_CASE__ ): print(SCREAMING_SNAKE_CASE__ ) return create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 ) current_subsequence.pop() if __name__ == "__main__": _lowerCAmelCase : list[Any] = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["A", "B", "C"]) generate_all_subsequences(seq)
202
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json", "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json", "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json", "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json", "bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json", "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json", "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json", "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json", "bert-large-uncased-whole-word-masking": ( "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json" ), "bert-large-cased-whole-word-masking": ( "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json" ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json" ), "bert-large-cased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json" ), "bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json", "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json", "bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json", "cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json", "cl-tohoku/bert-base-japanese-whole-word-masking": ( "https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json" ), "cl-tohoku/bert-base-japanese-char": ( "https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json" ), "cl-tohoku/bert-base-japanese-char-whole-word-masking": ( "https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json" ), "TurkuNLP/bert-base-finnish-cased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json" ), "TurkuNLP/bert-base-finnish-uncased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json" ), "wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json", # See all BERT models at https://huggingface.co/models?filter=bert } class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : str = '''bert''' def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ): super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" @property def snake_case_ ( self): if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ])
100
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ): """simple docstring""" __lowercase : Optional[Any] = KandinskyVaaImgaImgPipeline __lowercase : str = ['''image_embeds''', '''negative_image_embeds''', '''image'''] __lowercase : Dict = [ '''image_embeds''', '''negative_image_embeds''', '''image''', ] __lowercase : Tuple = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] __lowercase : Tuple = False @property def snake_case_ ( self): return 3_2 @property def snake_case_ ( self): return 3_2 @property def snake_case_ ( self): return self.time_input_dim @property def snake_case_ ( self): return self.time_input_dim * 4 @property def snake_case_ ( self): return 1_0_0 @property def snake_case_ ( self): torch.manual_seed(0) __SCREAMING_SNAKE_CASE = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __SCREAMING_SNAKE_CASE = UNetaDConditionModel(**lowerCAmelCase__) return model @property def snake_case_ ( self): return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def snake_case_ ( self): torch.manual_seed(0) __SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs) return model def snake_case_ ( self): __SCREAMING_SNAKE_CASE = self.dummy_unet __SCREAMING_SNAKE_CASE = self.dummy_movq __SCREAMING_SNAKE_CASE = { """num_train_timesteps""": 1_0_0_0, """beta_schedule""": """linear""", """beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } __SCREAMING_SNAKE_CASE = DDIMScheduler(**lowerCAmelCase__) __SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=0): __SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( lowerCAmelCase__) # create init_image __SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1)[0] __SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCAmelCase__)).convert("""RGB""").resize((2_5_6, 2_5_6)) if str(lowerCAmelCase__).startswith("""mps"""): __SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 6_4, """width""": 6_4, """num_inference_steps""": 1_0, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def snake_case_ ( self): __SCREAMING_SNAKE_CASE = """cpu""" __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = self.pipeline_class(**lowerCAmelCase__) __SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(lowerCAmelCase__)) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = pipe( **self.get_dummy_inputs(lowerCAmelCase__) , return_dict=lowerCAmelCase__ , )[0] __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __SCREAMING_SNAKE_CASE = np.array( [0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def snake_case_ ( self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self): __SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_img2img_frog.npy""") __SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""") __SCREAMING_SNAKE_CASE = """A red cartoon frog, 4k""" __SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa) pipe_prior.to(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = KandinskyVaaImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa) __SCREAMING_SNAKE_CASE = pipeline.to(lowerCAmelCase__) pipeline.set_progress_bar_config(disable=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""").manual_seed(0) __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = pipe_prior( lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __SCREAMING_SNAKE_CASE = pipeline( image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , ) __SCREAMING_SNAKE_CASE = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__)
100
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase_ : Dict = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : List[Any] = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : List[str] = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys lowerCamelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
357
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ): """simple docstring""" __lowerCAmelCase = RoFormerTokenizer __lowerCAmelCase = RoFormerTokenizerFast __lowerCAmelCase = True __lowerCAmelCase = True def SCREAMING_SNAKE_CASE ( self ) -> List[str]: super().setUp() def SCREAMING_SNAKE_CASE ( self , **__A ) -> Optional[int]: return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__A ) def SCREAMING_SNAKE_CASE ( self , **__A ) -> List[Any]: return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **__A ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: a ='''永和服装饰品有限公司,今天天气非常好''' a ='''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好''' return input_text, output_text def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: a =self.get_tokenizer() a , a =self.get_chinese_input_output_texts() a =tokenizer.tokenize(__A ) self.assertListEqual(__A , output_text.split() ) a =tokens + [tokenizer.unk_token] a =[2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: a =self.get_rust_tokenizer() a , a =self.get_chinese_input_output_texts() a =tokenizer.tokenize(__A ) self.assertListEqual(__A , output_text.split() ) a =tokens + [tokenizer.unk_token] a =[2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A ) def SCREAMING_SNAKE_CASE ( self ) -> Tuple: pass def SCREAMING_SNAKE_CASE ( self ) -> Tuple: pass def SCREAMING_SNAKE_CASE ( self ) -> int: pass
215
0
from __future__ import annotations from math import gcd def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int = 2 , _lowerCamelCase : int = 1 , _lowerCamelCase : int = 3 , ): # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError("The input value cannot be less than 2") # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int) -> int: return (pow(_lowerCamelCase , 2) + step) % modulus for _ in range(_lowerCamelCase): # These track the position within the cycle detection logic. lowercase__ : Optional[int] = seed lowercase__ : int = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowercase__ : List[Any] = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) lowercase__ : int = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowercase__ : Any = gcd(hare - tortoise , _lowerCamelCase) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowercase__ : Dict = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''num''', type=int, help='''The value to find a divisor of''', ) parser.add_argument( '''--attempts''', type=int, default=3, help='''The number of attempts before giving up''', ) UpperCamelCase = parser.parse_args() UpperCamelCase = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f"{args.num} is probably prime") else: UpperCamelCase = args.num // divisor print(f"{args.num} = {divisor} * {quotient}")
87
'''simple docstring''' def __magic_name__( lowerCamelCase): __lowerCAmelCase = set() # To detect a back edge, keep track of vertices currently in the recursion stack __lowerCAmelCase = set() return any( node not in visited and depth_first_search(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) for node in graph) def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): visited.add(lowerCamelCase) rec_stk.add(lowerCamelCase) for node in graph[vertex]: if node not in visited: if depth_first_search(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(lowerCamelCase) return False if __name__ == "__main__": from doctest import testmod testmod()
174
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): '''simple docstring''' lowerCamelCase = ViTImageProcessor if is_vision_available() else None @property def _lowerCAmelCase ( self ) -> List[str]: return self.image_processor_tester.prepare_image_processor_dict() def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =(3, 32, 1_28) _lowerCAmelCase =tempfile.mkdtemp() # fmt: off _lowerCAmelCase =["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _lowerCAmelCase =dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) _lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(__UpperCAmelCase ) + """\n""" ) _lowerCAmelCase ={ """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 1_28}, } _lowerCAmelCase =os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> List[Any]: return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def _lowerCAmelCase ( self , **__UpperCAmelCase ) -> Tuple: return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Any: shutil.rmtree(self.tmpdirname ) def _lowerCAmelCase ( self ) -> Union[str, Any]: _lowerCAmelCase =np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta ) _lowerCAmelCase =Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) return image_input def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =self.get_tokenizer() _lowerCAmelCase =self.get_image_processor() _lowerCAmelCase =MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase =MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Optional[int]: _lowerCAmelCase =self.get_tokenizer() _lowerCAmelCase =self.get_image_processor() _lowerCAmelCase =MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _lowerCAmelCase =self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) _lowerCAmelCase =MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.get_image_processor() _lowerCAmelCase =self.get_tokenizer() _lowerCAmelCase =MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) _lowerCAmelCase =self.prepare_image_inputs() _lowerCAmelCase =image_processor(__UpperCAmelCase , return_tensors="""np""" ) _lowerCAmelCase =processor(images=__UpperCAmelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _lowerCAmelCase ( self ) -> Optional[Any]: _lowerCAmelCase =self.get_image_processor() _lowerCAmelCase =self.get_tokenizer() _lowerCAmelCase =MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) _lowerCAmelCase ="""test""" _lowerCAmelCase =processor(text=__UpperCAmelCase ) _lowerCAmelCase =tokenizer(__UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowerCAmelCase ( self ) -> List[str]: _lowerCAmelCase =self.get_image_processor() _lowerCAmelCase =self.get_tokenizer() _lowerCAmelCase =MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) _lowerCAmelCase ="""test""" _lowerCAmelCase =self.prepare_image_inputs() _lowerCAmelCase =processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def _lowerCAmelCase ( self ) -> Dict: _lowerCAmelCase =self.get_image_processor() _lowerCAmelCase =self.get_tokenizer() _lowerCAmelCase =MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) _lowerCAmelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] _lowerCAmelCase =processor.char_decode(__UpperCAmelCase ) _lowerCAmelCase =tokenizer.batch_decode(__UpperCAmelCase ) _lowerCAmelCase =[seq.replace(""" """ , """""" ) for seq in decoded_tok] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def _lowerCAmelCase ( self ) -> Tuple: _lowerCAmelCase =self.get_image_processor() _lowerCAmelCase =self.get_tokenizer() _lowerCAmelCase =MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) _lowerCAmelCase =None _lowerCAmelCase =self.prepare_image_inputs() _lowerCAmelCase =processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def _lowerCAmelCase ( self ) -> Any: _lowerCAmelCase =self.get_image_processor() _lowerCAmelCase =self.get_tokenizer() _lowerCAmelCase =MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) _lowerCAmelCase =torch.randn(1 , 27 , 38 ) _lowerCAmelCase =torch.randn(1 , 27 , 5_02_57 ) _lowerCAmelCase =torch.randn(1 , 27 , 3_05_22 ) _lowerCAmelCase =processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
341
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( __magic_name__ ): '''simple docstring''' lowerCamelCase = ['''image_processor''', '''tokenizer'''] lowerCamelCase = '''CLIPImageProcessor''' lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''') def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]: _lowerCAmelCase =None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCAmelCase , ) _lowerCAmelCase =kwargs.pop("""feature_extractor""" ) _lowerCAmelCase =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: _lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if images is not None: _lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) if text is not None and images is not None: _lowerCAmelCase =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]: return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase ) def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]: return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase ) @property def _lowerCAmelCase ( self ) -> int: _lowerCAmelCase =self.tokenizer.model_input_names _lowerCAmelCase =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
341
1
from __future__ import annotations def __lowercase ( __lowerCAmelCase : List[Any] ): a__ = len(SCREAMING_SNAKE_CASE__ ) // 2 # choose the middle 3 elements a__ = lst[m - 1 : m + 2] # if middle element is peak if three[1] > three[0] and three[1] > three[2]: return three[1] # if increasing, recurse on right elif three[0] < three[2]: if len(lst[:m] ) == 2: m -= 1 return peak(lst[m:] ) # decreasing else: if len(lst[:m] ) == 2: m += 1 return peak(lst[:m] ) if __name__ == "__main__": import doctest doctest.testmod()
240
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def snake_case_ ( SCREAMING_SNAKE_CASE__ = 100_0000 , SCREAMING_SNAKE_CASE__ = 10 ): """simple docstring""" _SCREAMING_SNAKE_CASE : defaultdict = defaultdict(SCREAMING_SNAKE_CASE__ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: _SCREAMING_SNAKE_CASE : int = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: _SCREAMING_SNAKE_CASE : List[str] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(SCREAMING_SNAKE_CASE__ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(F"{solution() = }")
200
0
"""simple docstring""" from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , __lowerCAmelCase , ) class _lowerCamelCase ( __lowerCAmelCase ): """simple docstring""" snake_case = RobertaConfig snake_case = '''roberta''' def __init__( self , _SCREAMING_SNAKE_CASE )->Union[str, Any]: '''simple docstring''' super().__init__(lowerCAmelCase_ ) A_ : List[Any] = RobertaEmbeddings(lowerCAmelCase_ ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , __lowerCAmelCase , ) class _lowerCamelCase ( __lowerCAmelCase ): """simple docstring""" snake_case = RobertaConfig snake_case = '''roberta''' def __init__( self , _SCREAMING_SNAKE_CASE )->int: '''simple docstring''' super().__init__(lowerCAmelCase_ ) A_ : Any = config.num_labels A_ : str = config.num_hidden_layers A_ : List[Any] = DeeRobertaModel(lowerCAmelCase_ ) A_ : List[str] = nn.Dropout(config.hidden_dropout_prob ) A_ : Dict = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=False , )->Dict: '''simple docstring''' A_ : Union[str, Any] = self.num_layers try: A_ : List[str] = self.roberta( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , ) A_ : Optional[int] = outputs[1] A_ : Dict = self.dropout(lowerCAmelCase_ ) A_ : List[Any] = self.classifier(lowerCAmelCase_ ) A_ : int = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: A_ : str = e.message A_ : List[str] = e.exit_layer A_ : Optional[int] = outputs[0] if not self.training: A_ : List[Any] = entropy(lowerCAmelCase_ ) A_ : str = [] A_ : Union[str, Any] = [] if labels is not None: if self.num_labels == 1: # We are doing regression A_ : str = MSELoss() A_ : List[str] = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: A_ : str = CrossEntropyLoss() A_ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits A_ : List[str] = [] for highway_exit in outputs[-1]: A_ : Any = highway_exit[0] if not self.training: highway_logits_all.append(lowerCAmelCase_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression A_ : int = MSELoss() A_ : Optional[int] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: A_ : Dict = CrossEntropyLoss() A_ : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(lowerCAmelCase_ ) if train_highway: A_ : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: A_ : List[Any] = (loss,) + outputs if not self.training: A_ : Optional[int] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: A_ : Any = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
364
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def _snake_case ( self )->Any: '''simple docstring''' A_ : Dict = '''hf-internal-testing/tiny-random-t5''' A_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ : Union[str, Any] = tokenizer('''This is me''' , return_tensors='''pt''' ) A_ : Tuple = model.to_bettertransformer() self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) A_ : Dict = model.generate(**_SCREAMING_SNAKE_CASE ) A_ : Union[str, Any] = model.reverse_bettertransformer() self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_SCREAMING_SNAKE_CASE ) A_ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertFalse( any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) A_ : str = model_reloaded.generate(**_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _snake_case ( self )->Optional[Any]: '''simple docstring''' A_ : List[str] = '''hf-internal-testing/tiny-random-t5''' A_ : Dict = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE ) A_ : List[Any] = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_SCREAMING_SNAKE_CASE ): model.save_pretrained(_SCREAMING_SNAKE_CASE ) A_ : List[str] = model.reverse_bettertransformer() model.save_pretrained(_SCREAMING_SNAKE_CASE )
65
0
'''simple docstring''' _lowerCAmelCase = 0 # The first color of the flag. _lowerCAmelCase = 1 # The second color of the flag. _lowerCAmelCase = 2 # The third color of the flag. _lowerCAmelCase = (red, white, blue) def __lowerCAmelCase ( snake_case__ ): if not sequence: return [] if len(snake_case__ ) == 1: return list(snake_case__ ) __UpperCamelCase : List[str] = 0 __UpperCamelCase : Optional[int] = len(snake_case__ ) - 1 __UpperCamelCase : Dict = 0 while mid <= high: if sequence[mid] == colors[0]: __UpperCamelCase , __UpperCamelCase : List[Any] = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: __UpperCamelCase , __UpperCamelCase : Dict = sequence[high], sequence[mid] high -= 1 else: __UpperCamelCase : Optional[Any] = F"The elements inside the sequence must contains only {colors} values" raise ValueError(snake_case__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase = input('''Enter numbers separated by commas:\n''').strip() _lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')] print(f'{dutch_national_flag_sort(unsorted)}')
298
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class A ( unittest.TestCase ): '''simple docstring''' def __init__(self , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 3_2 , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 2_5_5 , _UpperCAmelCase = True , _UpperCAmelCase = True , _UpperCAmelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073] , _UpperCAmelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711] , _UpperCAmelCase = True , _UpperCAmelCase=7 , _UpperCAmelCase=3_0 , _UpperCAmelCase=4_0_0 , _UpperCAmelCase=3 , ) -> Dict: __UpperCamelCase : Dict = parent __UpperCamelCase : Any = do_resize __UpperCamelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 2_8_8} __UpperCamelCase : Any = size_divisor __UpperCamelCase : Optional[int] = do_rescale __UpperCamelCase : Union[str, Any] = rescale_factor __UpperCamelCase : int = do_normalize __UpperCamelCase : List[Any] = do_center_crop __UpperCamelCase : Optional[int] = image_mean __UpperCamelCase : Tuple = image_std __UpperCamelCase : Tuple = do_pad __UpperCamelCase : Tuple = batch_size __UpperCamelCase : Dict = num_channels __UpperCamelCase : Dict = min_resolution __UpperCamelCase : Optional[Any] = max_resolution def a_ (self ) -> Optional[int]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def a_ (self , _UpperCAmelCase , _UpperCAmelCase=False ) -> Optional[Any]: if not batched: __UpperCamelCase : List[str] = self.size["shortest_edge"] __UpperCamelCase : Optional[int] = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image ): __UpperCamelCase , __UpperCamelCase : Optional[Any] = image.size else: __UpperCamelCase , __UpperCamelCase : Union[str, Any] = image.shape[1], image.shape[2] __UpperCamelCase : Dict = size / min(_UpperCAmelCase , _UpperCAmelCase ) if h < w: __UpperCamelCase , __UpperCamelCase : Tuple = size, scale * w else: __UpperCamelCase , __UpperCamelCase : List[Any] = scale * h, size __UpperCamelCase : List[Any] = int((1_3_3_3 / 8_0_0) * size ) if max(_UpperCAmelCase , _UpperCAmelCase ) > max_size: __UpperCamelCase : str = max_size / max(_UpperCAmelCase , _UpperCAmelCase ) __UpperCamelCase : Dict = newh * scale __UpperCamelCase : Union[str, Any] = neww * scale __UpperCamelCase , __UpperCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 ) __UpperCamelCase , __UpperCamelCase : Optional[int] = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __UpperCamelCase : int = [] for image in image_inputs: __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCamelCase : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0] __UpperCamelCase : Union[str, Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A = BridgeTowerImageProcessor if is_vision_available() else None def a_ (self ) -> Dict: __UpperCamelCase : Optional[Any] = BridgeTowerImageProcessingTester(self ) @property def a_ (self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def a_ (self ) -> Union[str, Any]: __UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size" ) ) self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) ) def a_ (self ) -> List[str]: pass def a_ (self ) -> List[Any]: # Initialize image processor __UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[int] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> Tuple: # Initialize image processor __UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input __UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : List[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def a_ (self ) -> int: # Initialize image processor __UpperCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input __UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : int = self.image_processor_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Optional[Any] = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values __UpperCamelCase , __UpperCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
298
1
from __future__ import annotations def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ): if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): a__ , a__ = array[indexa], array[indexa] def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ): if length > 1: a__ = int(length / 2 ) for i in range(__lowerCAmelCase , low + middle ): comp_and_swap(__lowerCAmelCase , __lowerCAmelCase , i + middle , __lowerCAmelCase ) bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) bitonic_merge(__lowerCAmelCase , low + middle , __lowerCAmelCase , __lowerCAmelCase ) def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ): if length > 1: a__ = int(length / 2 ) bitonic_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 ) bitonic_sort(__lowerCAmelCase , low + middle , __lowerCAmelCase , 0 ) bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": snake_case : int = input('''Enter numbers separated by a comma:\n''').strip() snake_case : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('''\nSorted array in ascending order is: ''', end='''''') print(*unsorted, sep=''', ''') bitonic_merge(unsorted, 0, len(unsorted), 0) print('''Sorted array in descending order is: ''', end='''''') print(*unsorted, sep=''', ''')
109
from __future__ import annotations def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ): if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): a__ , a__ = array[indexa], array[indexa] def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ): if length > 1: a__ = int(length / 2 ) for i in range(__lowerCAmelCase , low + middle ): comp_and_swap(__lowerCAmelCase , __lowerCAmelCase , i + middle , __lowerCAmelCase ) bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) bitonic_merge(__lowerCAmelCase , low + middle , __lowerCAmelCase , __lowerCAmelCase ) def __lowercase ( __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int ): if length > 1: a__ = int(length / 2 ) bitonic_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 ) bitonic_sort(__lowerCAmelCase , low + middle , __lowerCAmelCase , 0 ) bitonic_merge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": snake_case : int = input('''Enter numbers separated by a comma:\n''').strip() snake_case : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')] bitonic_sort(unsorted, 0, len(unsorted), 1) print('''\nSorted array in ascending order is: ''', end='''''') print(*unsorted, sep=''', ''') bitonic_merge(unsorted, 0, len(unsorted), 0) print('''Sorted array in descending order is: ''', end='''''') print(*unsorted, sep=''', ''')
109
1
"""simple docstring""" def snake_case ( A__ ,A__ ): return int((input_a, input_a).count(0 ) == 0 ) def snake_case ( ): assert and_gate(0 ,0 ) == 0 assert and_gate(0 ,1 ) == 0 assert and_gate(1 ,0 ) == 0 assert and_gate(1 ,1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
268
"""simple docstring""" from __future__ import annotations import time lowerCamelCase_ = list[tuple[int, int]] lowerCamelCase_ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class UpperCamelCase_ : def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None ) -> Dict: UpperCAmelCase_ : Any = pos_x UpperCAmelCase_ : str = pos_y UpperCAmelCase_ : int = (pos_y, pos_x) UpperCAmelCase_ : int = goal_x UpperCAmelCase_ : Tuple = goal_y UpperCAmelCase_ : Union[str, Any] = parent class UpperCamelCase_ : def __init__( self : List[Any] , lowerCAmelCase_ : tuple[int, int] , lowerCAmelCase_ : tuple[int, int] ) -> Tuple: UpperCAmelCase_ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase_ ) UpperCAmelCase_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = [self.start] UpperCAmelCase_ : int = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Path | None: while self.node_queue: UpperCAmelCase_ : str = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: UpperCAmelCase_ : Optional[Any] = True return self.retrace_path(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_successors(lowerCAmelCase_ ) for node in successors: self.node_queue.append(lowerCAmelCase_ ) if not self.reached: return [self.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> list[Node]: UpperCAmelCase_ : List[str] = [] for action in delta: UpperCAmelCase_ : List[Any] = parent.pos_x + action[1] UpperCAmelCase_ : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , lowerCAmelCase_ ) ) return successors def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node | None ) -> Path: UpperCAmelCase_ : Union[str, Any] = node UpperCAmelCase_ : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Tuple = current_node.parent path.reverse() return path class UpperCamelCase_ : def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = False def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Path | None: while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: UpperCAmelCase_ : int = self.fwd_bfs.node_queue.pop(0 ) UpperCAmelCase_ : Dict = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: UpperCAmelCase_ : str = True return self.retrace_bidirectional_path( lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = current_bwd_node UpperCAmelCase_ : List[str] = current_fwd_node UpperCAmelCase_ : Tuple = { self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase_ ), self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase_ ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(lowerCAmelCase_ ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> Path: UpperCAmelCase_ : Optional[Any] = self.fwd_bfs.retrace_path(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.bwd_bfs.retrace_path(lowerCAmelCase_ ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase_ : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() lowerCamelCase_ = (0, 0) lowerCamelCase_ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowerCamelCase_ = time.time() lowerCamelCase_ = BreadthFirstSearch(init, goal) lowerCamelCase_ = bfs.search() lowerCamelCase_ = time.time() - start_bfs_time print('''Unidirectional BFS computation time : ''', bfs_time) lowerCamelCase_ = time.time() lowerCamelCase_ = BidirectionalBreadthFirstSearch(init, goal) lowerCamelCase_ = bd_bfs.search() lowerCamelCase_ = time.time() - start_bd_bfs_time print('''Bidirectional BFS computation time : ''', bd_bfs_time)
268
1
'''simple docstring''' import numpy as np from PIL import Image def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : List[str] = np.array(lowerCamelCase__ ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) A_ : Union[str, Any] = 0 A_ : Optional[Any] = 0 A_ : Tuple = 0 A_ : List[str] = 0 # compute the shape of the output matrix A_ : List[Any] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape A_ : Any = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix A_ : Dict = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 A_ : Tuple = 0 A_ : Union[str, Any] = 0 return updated_arr def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' A_ : Any = np.array(lowerCamelCase__ ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) A_ : List[Any] = 0 A_ : List[str] = 0 A_ : Optional[int] = 0 A_ : Union[str, Any] = 0 # compute the shape of the output matrix A_ : str = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape A_ : Optional[Any] = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix A_ : int = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 A_ : Union[str, Any] = 0 A_ : int = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='''avgpooling''', verbose=True) # Loading the image lowerCamelCase :str = Image.open('''path_to_image''') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
135
'''simple docstring''' def a ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' if density <= 0: raise ValueError("""Impossible fluid density""" ) if bulk_modulus <= 0: raise ValueError("""Impossible bulk modulus""" ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
135
1