code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = '''▁''' __lowerCAmelCase = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __lowerCAmelCase = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } __lowerCAmelCase = { '''facebook/m2m100_418M''': 1_024, } # fmt: off __lowerCAmelCase = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask'] lowerCAmelCase : List[int] = [] lowerCAmelCase : List[int] = [] def __init__( self : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : Union[str, Any]="<s>" ,_UpperCAmelCase : Union[str, Any]="</s>" ,_UpperCAmelCase : int="</s>" ,_UpperCAmelCase : Tuple="<pad>" ,_UpperCAmelCase : str="<unk>" ,_UpperCAmelCase : Tuple="m2m100" ,_UpperCAmelCase : Optional[Dict[str, Any]] = None ,_UpperCAmelCase : List[Any]=8 ,**_UpperCAmelCase : Union[str, Any] ,): _a : int = {} if sp_model_kwargs is None else sp_model_kwargs _a : int = language_codes _a : List[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes] _a : Union[str, Any] = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code} _a : List[str] = kwargs.get('additional_special_tokens' ,[] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(_UpperCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(_UpperCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_UpperCAmelCase ,tgt_lang=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,language_codes=_UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=_UpperCAmelCase ,**_UpperCAmelCase ,) _a : Any = vocab_file _a : int = load_json(_UpperCAmelCase ) _a : Dict = {v: k for k, v in self.encoder.items()} _a : int = spm_file _a : int = load_spm(_UpperCAmelCase ,self.sp_model_kwargs ) _a : Any = len(self.encoder ) _a : str = { self.get_lang_token(_UpperCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(_UpperCAmelCase ) } _a : Optional[int] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_UpperCAmelCase )} _a : Dict = {v: k for k, v in self.lang_token_to_id.items()} _a : Union[str, Any] = src_lang if src_lang is not None else 'en' _a : Optional[int] = tgt_lang _a : Optional[Any] = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) _a : List[Any] = num_madeup_words @property def __lowercase ( self : int ): return len(self.encoder ) + len(self.lang_token_to_id ) @property def __lowercase ( self : Optional[int] ): return self._src_lang @src_lang.setter def __lowercase ( self : str ,_UpperCAmelCase : str ): _a : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : str ): return self.sp_model.encode(_UpperCAmelCase ,out_type=_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ): if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(_UpperCAmelCase ,self.encoder[self.unk_token] ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ): if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(_UpperCAmelCase ,self.unk_token ) def __lowercase ( self : int ,_UpperCAmelCase : Dict ): _a : Dict = [] _a : List[str] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_UpperCAmelCase ) + token _a : Optional[Any] = [] else: current_sub_tokens.append(_UpperCAmelCase ) out_string += self.sp_model.decode(_UpperCAmelCase ) return out_string.strip() def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase ) _a : List[str] = [1] * len(self.prefix_tokens ) _a : Any = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __lowercase ( self : List[Any] ): _a : Dict = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): _a : Dict = self.__dict__.copy() _a : Any = None return state def __setstate__( self : Union[str, Any] ,_UpperCAmelCase : Dict ): _a : List[Any] = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): _a : Union[str, Any] = {} _a : Any = load_spm(self.spm_file ,self.sp_model_kwargs ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ): _a : List[str] = Path(_UpperCAmelCase ) if not save_dir.is_dir(): raise OSError(F"""{save_directory} should be a directory""" ) _a : int = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) _a : Tuple = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder ,_UpperCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file ,_UpperCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(_UpperCAmelCase ,'wb' ) as fi: _a : Tuple = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (str(_UpperCAmelCase ), str(_UpperCAmelCase )) def __lowercase ( self : List[str] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str = "en" ,_UpperCAmelCase : Optional[List[str]] = None ,_UpperCAmelCase : str = "ro" ,**_UpperCAmelCase : List[Any] ,): _a : int = src_lang _a : List[str] = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase ) def __lowercase ( self : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] ,**_UpperCAmelCase : List[Any] ): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _a : List[str] = src_lang _a : Optional[int] = self(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,**_UpperCAmelCase ) _a : List[str] = self.get_lang_id(_UpperCAmelCase ) _a : List[str] = tgt_lang_id return inputs def __lowercase ( self : List[str] ): self.set_src_lang_special_tokens(self.src_lang ) def __lowercase ( self : Any ): self.set_tgt_lang_special_tokens(self.tgt_lang ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : str ): _a : Tuple = self.get_lang_token(_UpperCAmelCase ) _a : Optional[Any] = self.lang_token_to_id[lang_token] _a : Union[str, Any] = [self.cur_lang_id] _a : Union[str, Any] = [self.eos_token_id] def __lowercase ( self : List[str] ,_UpperCAmelCase : str ): _a : int = self.get_lang_token(_UpperCAmelCase ) _a : Optional[Any] = self.lang_token_to_id[lang_token] _a : Dict = [self.cur_lang_id] _a : Optional[int] = [self.eos_token_id] def __lowercase ( self : str ,_UpperCAmelCase : str ): return self.lang_code_to_token[lang] def __lowercase ( self : List[Any] ,_UpperCAmelCase : str ): _a : Optional[int] = self.get_lang_token(_UpperCAmelCase ) return self.lang_token_to_id[lang_token] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> sentencepiece.SentencePieceProcessor: _a : int = sentencepiece.SentencePieceProcessor(**lowerCAmelCase_ ) spm.Load(str(lowerCAmelCase_ ) ) return spm def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[Dict, List]: with open(lowerCAmelCase_ , 'r' ) as f: return json.load(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None: with open(lowerCAmelCase_ , 'w' ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=2 )
89
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase = { '''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''], '''tokenization_cpmant''': ['''CpmAntTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CpmAntForCausalLM''', '''CpmAntModel''', '''CpmAntPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Tuple = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) lowerCAmelCase : List[str] = 'CIDAS/clipseg-rd64-refined' lowerCAmelCase : Optional[int] = 'image_segmenter' lowerCAmelCase : Union[str, Any] = CLIPSegForImageSegmentation lowerCAmelCase : str = ['image', 'text'] lowerCAmelCase : int = ['image'] def __init__( self : Tuple ,*_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Any ): requires_backends(self ,['vision'] ) super().__init__(*_UpperCAmelCase ,**_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : "Image" ,_UpperCAmelCase : str ): return self.pre_processor(text=[label] ,images=[image] ,padding=_UpperCAmelCase ,return_tensors='pt' ) def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Optional[Any] ): with torch.no_grad(): _a : int = self.model(**_UpperCAmelCase ).logits return logits def __lowercase ( self : List[Any] ,_UpperCAmelCase : Optional[int] ): _a : Dict = outputs.cpu().detach().numpy() _a : List[str] = 0 _a : int = 1 return Image.fromarray((array * 255).astype(np.uinta ) )
89
'''simple docstring''' import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : str = LayoutLMTokenizer lowerCAmelCase : Tuple = LayoutLMTokenizerFast lowerCAmelCase : List[Any] = True lowerCAmelCase : int = True def __lowercase ( self : Dict ): super().setUp() _a : int = [ '[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ): _a : Optional[int] = 'UNwant\u00E9d,running' _a : List[Any] = 'unwanted, running' return input_text, output_text def __lowercase ( self : Optional[int] ): _a : Optional[Any] = self.tokenizer_class(self.vocab_file ) _a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] ) def __lowercase ( self : Optional[int] ): pass
89
1
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) __lowerCAmelCase = { '''sample_size''': 32, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 1_000, '''block_out_channels''': [32, 64], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __lowerCAmelCase = { '''sample_size''': 64, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 1_000, '''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __lowerCAmelCase = { '''sample_size''': 256, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __lowerCAmelCase = { '''num_train_timesteps''': 40, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } __lowerCAmelCase = { '''num_train_timesteps''': 201, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } __lowerCAmelCase = { '''num_train_timesteps''': 151, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> int: _a : Tuple = checkpoint[f"""{old_prefix}.in_layers.0.weight"""] _a : int = checkpoint[f"""{old_prefix}.in_layers.0.bias"""] _a : Tuple = checkpoint[f"""{old_prefix}.in_layers.2.weight"""] _a : List[str] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""] _a : Optional[int] = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""] _a : Optional[Any] = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""] _a : Optional[Any] = checkpoint[f"""{old_prefix}.out_layers.0.weight"""] _a : List[str] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""] _a : int = checkpoint[f"""{old_prefix}.out_layers.3.weight"""] _a : int = checkpoint[f"""{old_prefix}.out_layers.3.bias"""] if has_skip: _a : List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""] _a : Any = checkpoint[f"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Optional[int]: _a , _a , _a : Optional[Any] = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) _a , _a , _a : Optional[Any] = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) _a : int = checkpoint[f"""{old_prefix}.norm.weight"""] _a : Dict = checkpoint[f"""{old_prefix}.norm.bias"""] _a : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 ) _a : int = bias_q.squeeze(-1 ).squeeze(-1 ) _a : Optional[Any] = weight_k.squeeze(-1 ).squeeze(-1 ) _a : int = bias_k.squeeze(-1 ).squeeze(-1 ) _a : Dict = weight_v.squeeze(-1 ).squeeze(-1 ) _a : List[str] = bias_v.squeeze(-1 ).squeeze(-1 ) _a : Union[str, Any] = ( checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) _a : int = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _a : List[Any] = torch.load(lowerCAmelCase_ , map_location='cpu' ) _a : List[Any] = {} _a : List[str] = checkpoint['time_embed.0.weight'] _a : Union[str, Any] = checkpoint['time_embed.0.bias'] _a : Optional[Any] = checkpoint['time_embed.2.weight'] _a : str = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: _a : List[str] = checkpoint['label_emb.weight'] _a : Optional[Any] = checkpoint['input_blocks.0.0.weight'] _a : List[Any] = checkpoint['input_blocks.0.0.bias'] _a : Optional[int] = unet_config['down_block_types'] _a : Union[str, Any] = unet_config['layers_per_block'] _a : Tuple = unet_config['attention_head_dim'] _a : Dict = unet_config['block_out_channels'] _a : int = 1 _a : Dict = channels_list[0] for i, layer_type in enumerate(lowerCAmelCase_ ): _a : Optional[Any] = channels_list[i] _a : List[Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(lowerCAmelCase_ ): _a : Tuple = f"""down_blocks.{i}.resnets.{j}""" _a : Any = f"""input_blocks.{current_layer}.0""" _a : str = True if j == 0 and downsample_block_has_skip else False _a : Tuple = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , has_skip=lowerCAmelCase_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(lowerCAmelCase_ ): _a : Tuple = f"""down_blocks.{i}.resnets.{j}""" _a : List[str] = f"""input_blocks.{current_layer}.0""" _a : Optional[Any] = True if j == 0 and downsample_block_has_skip else False _a : Any = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , has_skip=lowerCAmelCase_ ) _a : List[str] = f"""down_blocks.{i}.attentions.{j}""" _a : Optional[int] = f"""input_blocks.{current_layer}.1""" _a : List[str] = convert_attention( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) current_layer += 1 if i != len(lowerCAmelCase_ ) - 1: _a : Optional[Any] = f"""down_blocks.{i}.downsamplers.0""" _a : Tuple = f"""input_blocks.{current_layer}.0""" _a : str = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) current_layer += 1 _a : List[str] = current_channels # hardcoded the mid-block for now _a : Optional[Any] = 'mid_block.resnets.0' _a : str = 'middle_block.0' _a : Optional[int] = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _a : Optional[Any] = 'mid_block.attentions.0' _a : Any = 'middle_block.1' _a : Optional[int] = convert_attention(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _a : int = 'mid_block.resnets.1' _a : Any = 'middle_block.2' _a : Union[str, Any] = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _a : Union[str, Any] = 0 _a : str = unet_config['up_block_types'] for i, layer_type in enumerate(lowerCAmelCase_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _a : int = f"""up_blocks.{i}.resnets.{j}""" _a : int = f"""output_blocks.{current_layer}.0""" _a : Union[str, Any] = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , has_skip=lowerCAmelCase_ ) current_layer += 1 if i != len(lowerCAmelCase_ ) - 1: _a : List[str] = f"""up_blocks.{i}.upsamplers.0""" _a : Tuple = f"""output_blocks.{current_layer-1}.1""" _a : str = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _a : Any = f"""up_blocks.{i}.resnets.{j}""" _a : Optional[int] = f"""output_blocks.{current_layer}.0""" _a : Tuple = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , has_skip=lowerCAmelCase_ ) _a : Union[str, Any] = f"""up_blocks.{i}.attentions.{j}""" _a : str = f"""output_blocks.{current_layer}.1""" _a : Union[str, Any] = convert_attention( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) current_layer += 1 if i != len(lowerCAmelCase_ ) - 1: _a : Any = f"""up_blocks.{i}.upsamplers.0""" _a : int = f"""output_blocks.{current_layer-1}.2""" _a : List[Any] = convert_resnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _a : List[str] = checkpoint['out.0.weight'] _a : int = checkpoint['out.0.bias'] _a : Tuple = checkpoint['out.2.weight'] _a : Tuple = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = strabool(args.class_cond) __lowerCAmelCase = os.path.basename(args.unet_path) print(f"""Checkpoint: {ckpt_name}""") # Get U-Net config if "imagenet64" in ckpt_name: __lowerCAmelCase = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __lowerCAmelCase = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: __lowerCAmelCase = TEST_UNET_CONFIG else: raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""") if not args.class_cond: __lowerCAmelCase = None __lowerCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config) __lowerCAmelCase = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: __lowerCAmelCase = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: __lowerCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __lowerCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""") __lowerCAmelCase = CMStochasticIterativeScheduler(**scheduler_config) __lowerCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
89
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Any = 'conditional_detr' lowerCAmelCase : List[str] = ['past_key_values'] lowerCAmelCase : Optional[int] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,): if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) _a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : str = backbone_config.get('model_type' ) _a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] _a : List[Any] = config_class.from_dict(_UpperCAmelCase ) _a : Tuple = use_timm_backbone _a : Union[str, Any] = backbone_config _a : List[Any] = num_channels _a : Union[str, Any] = num_queries _a : Optional[Any] = d_model _a : Tuple = encoder_ffn_dim _a : Dict = encoder_layers _a : List[str] = encoder_attention_heads _a : Union[str, Any] = decoder_ffn_dim _a : Optional[int] = decoder_layers _a : int = decoder_attention_heads _a : Optional[int] = dropout _a : Tuple = attention_dropout _a : List[Any] = activation_dropout _a : str = activation_function _a : Optional[Any] = init_std _a : Union[str, Any] = init_xavier_std _a : List[Any] = encoder_layerdrop _a : List[Any] = decoder_layerdrop _a : Dict = encoder_layers _a : List[Any] = auxiliary_loss _a : Optional[int] = position_embedding_type _a : List[Any] = backbone _a : Optional[int] = use_pretrained_backbone _a : Optional[int] = dilation # Hungarian matcher _a : Tuple = class_cost _a : str = bbox_cost _a : Any = giou_cost # Loss coefficients _a : Tuple = mask_loss_coefficient _a : Dict = dice_loss_coefficient _a : Tuple = cls_loss_coefficient _a : Any = bbox_loss_coefficient _a : Dict = giou_loss_coefficient _a : Union[str, Any] = focal_alpha super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase ) @property def __lowercase ( self : Dict ): return self.encoder_attention_heads @property def __lowercase ( self : str ): return self.d_model def __lowercase ( self : int ): _a : List[str] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _a : Dict = self.backbone_config.to_dict() _a : Union[str, Any] = self.__class__.model_type return output class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = version.parse('1.11' ) @property def __lowercase ( self : Dict ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ] ) @property def __lowercase ( self : Any ): return 1E-5 @property def __lowercase ( self : List[Any] ): return 12
89
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCAmelCase = 16 __lowerCAmelCase = 32 def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 16 ) -> Any: _a : List[Any] = AutoTokenizer.from_pretrained('bert-base-cased' ) _a : Any = load_dataset('glue' , 'mrpc' ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) _a : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _a : Optional[Any] = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _a : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. _a : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _a : Dict = 16 elif accelerator.mixed_precision != "no": _a : Any = 8 else: _a : Optional[Any] = None return tokenizer.pad( lowerCAmelCase_ , padding='longest' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='pt' , ) # Instantiate dataloaders. _a : Dict = DataLoader( tokenized_datasets['train'] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) _a : Dict = DataLoader( tokenized_datasets['validation'] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders __lowerCAmelCase = mocked_dataloaders # noqa: F811 def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict: # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowerCAmelCase_ ) == "1": _a : Any = 2 # Initialize accelerator _a : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _a : int = config['lr'] _a : List[Any] = int(config['num_epochs'] ) _a : Any = int(config['seed'] ) _a : Dict = int(config['batch_size'] ) _a : int = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation _a : Tuple = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _a : List[Any] = batch_size // MAX_GPU_BATCH_SIZE _a : Union[str, Any] = MAX_GPU_BATCH_SIZE set_seed(lowerCAmelCase_ ) _a , _a : Union[str, Any] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _a : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _a : Union[str, Any] = model.to(accelerator.device ) # Instantiate optimizer _a : int = AdamW(params=model.parameters() , lr=lowerCAmelCase_ ) # Instantiate scheduler _a : Optional[int] = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _a , _a , _a , _a , _a : Optional[int] = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _a : str = model(**lowerCAmelCase_ ) _a : Union[str, Any] = outputs.loss _a : Optional[Any] = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() _a : int = 0 for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _a : List[str] = model(**lowerCAmelCase_ ) _a : Optional[int] = outputs.logits.argmax(dim=-1 ) _a , _a : Union[str, Any] = accelerator.gather((predictions, batch['labels']) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(lowerCAmelCase_ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples _a : Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen] _a : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) _a : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ ) def __lowerCamelCase ( ) -> List[str]: _a : Dict = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) _a : Any = parser.parse_args() _a : Optional[Any] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
89
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __magic_name__ : def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,): _a : Optional[Any] = parent _a : List[Any] = batch_size _a : str = image_size _a : Union[str, Any] = num_channels _a : List[Any] = num_stages _a : Dict = hidden_sizes _a : int = depths _a : Tuple = is_training _a : List[str] = use_labels _a : Dict = intermediate_size _a : int = hidden_act _a : int = num_labels _a : Any = initializer_range _a : Tuple = out_features _a : int = out_indices _a : List[Any] = scope def __lowercase ( self : Dict ): _a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : Union[str, Any] = None if self.use_labels: _a : Tuple = ids_tensor([self.batch_size] ,self.num_labels ) _a : str = self.get_config() return config, pixel_values, labels def __lowercase ( self : Any ): return ConvNextVaConfig( num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ): _a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Any = model(_UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ): _a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ): _a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Dict = model(_UpperCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] ) # verify backbone works with out_features=None _a : Tuple = None _a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def __lowercase ( self : Optional[Any] ): _a : Any = self.prepare_config_and_inputs() _a , _a , _a : Union[str, Any] = config_and_inputs _a : Any = {'pixel_values': pixel_values} return config, inputs_dict def __lowercase ( self : str ): _a : Tuple = self.prepare_config_and_inputs() _a , _a , _a : Tuple = config_and_inputs _a : List[Any] = {'pixel_values': pixel_values, 'labels': labels} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : str = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCAmelCase : str = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : int = False lowerCAmelCase : str = False lowerCAmelCase : Optional[Any] = False lowerCAmelCase : List[str] = False lowerCAmelCase : Optional[int] = False def __lowercase ( self : List[Any] ): _a : str = ConvNextVaModelTester(self ) _a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 ) def __lowercase ( self : Optional[Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowercase ( self : str ): return @unittest.skip(reason='ConvNextV2 does not use inputs_embeds' ) def __lowercase ( self : List[Any] ): pass @unittest.skip(reason='ConvNextV2 does not support input and output embeddings' ) def __lowercase ( self : Optional[int] ): pass @unittest.skip(reason='ConvNextV2 does not use feedforward chunking' ) def __lowercase ( self : Any ): pass def __lowercase ( self : List[str] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Any = True if model_class.__name__ in [ *get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase ), ]: continue _a : Optional[Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() _a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : Optional[int] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : str ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Optional[int] = False _a : Tuple = True if ( model_class.__name__ in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue _a : Tuple = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.gradient_checkpointing_enable() model.train() _a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : List[Any] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : List[Any] ): _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = model_class(_UpperCAmelCase ) _a : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Dict = [*signature.parameters.keys()] _a : int = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_UpperCAmelCase ) def __lowercase ( self : int ): _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def __lowercase ( self : Any ): def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ): _a : Union[str, Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) ) _a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _a : str = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) _a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a : Optional[Any] = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : List[Any] ): _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def __lowercase ( self : int ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __lowerCamelCase ( ) -> List[Any]: _a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): @cached_property def __lowercase ( self : Optional[Any] ): return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None @slow def __lowercase ( self : Any ): _a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase ) _a : Optional[int] = self.default_image_processor _a : str = prepare_img() _a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _a : Dict = model(**_UpperCAmelCase ) # verify the logits _a : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,_UpperCAmelCase ) _a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
89
1
'''simple docstring''' from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class __magic_name__ : pass
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
89
'''simple docstring''' import math def __lowerCamelCase ( lowerCAmelCase_ ) -> bool: _a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int: _a : int = 0 _a : Optional[Any] = 0 _a : int = 3 while True: _a : Tuple = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(lowerCAmelCase_ ): _a : Union[str, Any] = int(lowerCAmelCase_ ) total_partitions += 1 if check_partition_perfect(lowerCAmelCase_ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(lowerCAmelCase_ ) integer += 1 if __name__ == "__main__": print(f"""{solution() = }""")
89
1
'''simple docstring''' from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig __lowerCAmelCase = logging.get_logger(__name__) # General docstring __lowerCAmelCase = '''RegNetConfig''' # Base docstring __lowerCAmelCase = '''facebook/regnet-y-040''' __lowerCAmelCase = [1, 1_088, 7, 7] # Image classification docstring __lowerCAmelCase = '''facebook/regnet-y-040''' __lowerCAmelCase = '''tabby, tabby cat''' __lowerCAmelCase = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class __magic_name__ ( nn.Module ): def __init__( self : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 3 ,_UpperCAmelCase : int = 1 ,_UpperCAmelCase : int = 1 ,_UpperCAmelCase : Optional[str] = "relu" ,): super().__init__() _a : Optional[int] = nn.Convad( _UpperCAmelCase ,_UpperCAmelCase ,kernel_size=_UpperCAmelCase ,stride=_UpperCAmelCase ,padding=kernel_size // 2 ,groups=_UpperCAmelCase ,bias=_UpperCAmelCase ,) _a : Any = nn.BatchNormad(_UpperCAmelCase ) _a : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity() def __lowercase ( self : int ,_UpperCAmelCase : List[str] ): _a : Dict = self.convolution(_UpperCAmelCase ) _a : List[str] = self.normalization(_UpperCAmelCase ) _a : List[Any] = self.activation(_UpperCAmelCase ) return hidden_state class __magic_name__ ( nn.Module ): def __init__( self : Any ,_UpperCAmelCase : RegNetConfig ): super().__init__() _a : List[Any] = RegNetConvLayer( config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ) _a : List[str] = config.num_channels def __lowercase ( self : str ,_UpperCAmelCase : Tuple ): _a : Tuple = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) _a : Optional[int] = self.embedder(_UpperCAmelCase ) return hidden_state class __magic_name__ ( nn.Module ): def __init__( self : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 2 ): super().__init__() _a : str = nn.Convad(_UpperCAmelCase ,_UpperCAmelCase ,kernel_size=1 ,stride=_UpperCAmelCase ,bias=_UpperCAmelCase ) _a : Optional[Any] = nn.BatchNormad(_UpperCAmelCase ) def __lowercase ( self : Dict ,_UpperCAmelCase : Tensor ): _a : int = self.convolution(_UpperCAmelCase ) _a : Any = self.normalization(_UpperCAmelCase ) return hidden_state class __magic_name__ ( nn.Module ): def __init__( self : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ): super().__init__() _a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) _a : int = nn.Sequential( nn.Convad(_UpperCAmelCase ,_UpperCAmelCase ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_UpperCAmelCase ,_UpperCAmelCase ,kernel_size=1 ) ,nn.Sigmoid() ,) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Optional[int] ): # b c h w -> b c 1 1 _a : int = self.pooler(_UpperCAmelCase ) _a : Dict = self.attention(_UpperCAmelCase ) _a : List[str] = hidden_state * attention return hidden_state class __magic_name__ ( nn.Module ): def __init__( self : List[Any] ,_UpperCAmelCase : RegNetConfig ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 1 ): super().__init__() _a : str = in_channels != out_channels or stride != 1 _a : int = max(1 ,out_channels // config.groups_width ) _a : Tuple = ( RegNetShortCut(_UpperCAmelCase ,_UpperCAmelCase ,stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) _a : Any = nn.Sequential( RegNetConvLayer(_UpperCAmelCase ,_UpperCAmelCase ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_UpperCAmelCase ,_UpperCAmelCase ,stride=_UpperCAmelCase ,groups=_UpperCAmelCase ,activation=config.hidden_act ) ,RegNetConvLayer(_UpperCAmelCase ,_UpperCAmelCase ,kernel_size=1 ,activation=_UpperCAmelCase ) ,) _a : Any = ACTaFN[config.hidden_act] def __lowercase ( self : List[Any] ,_UpperCAmelCase : str ): _a : str = hidden_state _a : str = self.layer(_UpperCAmelCase ) _a : Tuple = self.shortcut(_UpperCAmelCase ) hidden_state += residual _a : Optional[Any] = self.activation(_UpperCAmelCase ) return hidden_state class __magic_name__ ( nn.Module ): def __init__( self : Any ,_UpperCAmelCase : RegNetConfig ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 1 ): super().__init__() _a : Optional[int] = in_channels != out_channels or stride != 1 _a : str = max(1 ,out_channels // config.groups_width ) _a : Dict = ( RegNetShortCut(_UpperCAmelCase ,_UpperCAmelCase ,stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) _a : int = nn.Sequential( RegNetConvLayer(_UpperCAmelCase ,_UpperCAmelCase ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_UpperCAmelCase ,_UpperCAmelCase ,stride=_UpperCAmelCase ,groups=_UpperCAmelCase ,activation=config.hidden_act ) ,RegNetSELayer(_UpperCAmelCase ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_UpperCAmelCase ,_UpperCAmelCase ,kernel_size=1 ,activation=_UpperCAmelCase ) ,) _a : Optional[int] = ACTaFN[config.hidden_act] def __lowercase ( self : Dict ,_UpperCAmelCase : Any ): _a : List[str] = hidden_state _a : Tuple = self.layer(_UpperCAmelCase ) _a : Dict = self.shortcut(_UpperCAmelCase ) hidden_state += residual _a : List[str] = self.activation(_UpperCAmelCase ) return hidden_state class __magic_name__ ( nn.Module ): def __init__( self : Any ,_UpperCAmelCase : RegNetConfig ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int = 2 ,_UpperCAmelCase : int = 2 ,): super().__init__() _a : Dict = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer _a : Any = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,stride=_UpperCAmelCase ,) ,*[layer(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) for _ in range(depth - 1 )] ,) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Optional[Any] ): _a : Union[str, Any] = self.layers(_UpperCAmelCase ) return hidden_state class __magic_name__ ( nn.Module ): def __init__( self : str ,_UpperCAmelCase : RegNetConfig ): super().__init__() _a : Dict = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( _UpperCAmelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) ) _a : int = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_UpperCAmelCase ,config.depths[1:] ): self.stages.append(RegNetStage(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,depth=_UpperCAmelCase ) ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Tensor ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = True ): _a : Optional[int] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _a : str = hidden_states + (hidden_state,) _a : Any = stage_module(_UpperCAmelCase ) if output_hidden_states: _a : str = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase ,hidden_states=_UpperCAmelCase ) class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : int = RegNetConfig lowerCAmelCase : int = 'regnet' lowerCAmelCase : Optional[int] = 'pixel_values' lowerCAmelCase : List[Any] = True def __lowercase ( self : Dict ,_UpperCAmelCase : Dict ): if isinstance(_UpperCAmelCase ,nn.Convad ): nn.init.kaiming_normal_(module.weight ,mode='fan_out' ,nonlinearity='relu' ) elif isinstance(_UpperCAmelCase ,(nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight ,1 ) nn.init.constant_(module.bias ,0 ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Dict=False ): if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Union[str, Any] = value __lowerCAmelCase = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __lowerCAmelCase = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , _UpperCamelCase , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __magic_name__ ( _UpperCamelCase ): def __init__( self : Optional[Any] ,_UpperCAmelCase : Any ): super().__init__(_UpperCAmelCase ) _a : List[Any] = config _a : int = RegNetEmbeddings(_UpperCAmelCase ) _a : str = RegNetEncoder(_UpperCAmelCase ) _a : Optional[int] = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def __lowercase ( self : List[str] ,_UpperCAmelCase : Tensor ,_UpperCAmelCase : Optional[bool] = None ,_UpperCAmelCase : Optional[bool] = None ): _a : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _a : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict _a : List[str] = self.embedder(_UpperCAmelCase ) _a : Tuple = self.encoder( _UpperCAmelCase ,output_hidden_states=_UpperCAmelCase ,return_dict=_UpperCAmelCase ) _a : List[Any] = encoder_outputs[0] _a : int = self.pooler(_UpperCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase ,pooler_output=_UpperCAmelCase ,hidden_states=encoder_outputs.hidden_states ,) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , _UpperCamelCase , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __magic_name__ ( _UpperCamelCase ): def __init__( self : Union[str, Any] ,_UpperCAmelCase : Dict ): super().__init__(_UpperCAmelCase ) _a : List[Any] = config.num_labels _a : Any = RegNetModel(_UpperCAmelCase ) # classification head _a : Union[str, Any] = nn.Sequential( nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_UpperCAmelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def __lowercase ( self : Dict ,_UpperCAmelCase : Optional[torch.FloatTensor] = None ,_UpperCAmelCase : Optional[torch.LongTensor] = None ,_UpperCAmelCase : Optional[bool] = None ,_UpperCAmelCase : Optional[bool] = None ,): _a : Any = return_dict if return_dict is not None else self.config.use_return_dict _a : Optional[int] = self.regnet(_UpperCAmelCase ,output_hidden_states=_UpperCAmelCase ,return_dict=_UpperCAmelCase ) _a : Optional[int] = outputs.pooler_output if return_dict else outputs[1] _a : List[Any] = self.classifier(_UpperCAmelCase ) _a : Dict = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _a : Tuple = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _a : Any = 'single_label_classification' else: _a : Union[str, Any] = 'multi_label_classification' if self.config.problem_type == "regression": _a : List[str] = MSELoss() if self.num_labels == 1: _a : int = loss_fct(logits.squeeze() ,labels.squeeze() ) else: _a : Union[str, Any] = loss_fct(_UpperCAmelCase ,_UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": _a : List[Any] = CrossEntropyLoss() _a : Tuple = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _a : List[Any] = BCEWithLogitsLoss() _a : int = loss_fct(_UpperCAmelCase ,_UpperCAmelCase ) if not return_dict: _a : int = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase ,logits=_UpperCAmelCase ,hidden_states=outputs.hidden_states )
89
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple: _a : Any = [] for old_item in old_list: _a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' ) _a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' ) _a : str = new_item.replace('out_layers.0' , 'norm2' ) _a : List[str] = new_item.replace('out_layers.3' , 'conv2' ) _a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' ) _a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' ) _a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any: _a : List[str] = [] for old_item in old_list: _a : List[Any] = old_item _a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' ) _a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' ) _a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) _a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) _a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _a : Optional[Any] = old_checkpoint[path] _a : Optional[Any] = old_tensor.shape[0] // 3 _a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _a : int = old_tensor.shape[0] // config['num_head_channels'] // 3 _a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 ) _a : Dict = query.reshape(lowerCAmelCase_ ) _a : str = key.reshape(lowerCAmelCase_ ) _a : Optional[int] = value.reshape(lowerCAmelCase_ ) for path in paths: _a : Dict = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) _a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) _a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: _a : int = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _a : List[str] = old_checkpoint[path['old']][:, :, 0] else: _a : Dict = old_checkpoint[path['old']] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _a : Optional[int] = {} _a : Dict = checkpoint['time_embed.0.weight'] _a : Tuple = checkpoint['time_embed.0.bias'] _a : Union[str, Any] = checkpoint['time_embed.2.weight'] _a : List[str] = checkpoint['time_embed.2.bias'] _a : List[str] = checkpoint['input_blocks.0.0.weight'] _a : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] _a : Optional[int] = checkpoint['out.0.weight'] _a : int = checkpoint['out.0.bias'] _a : List[str] = checkpoint['out.2.weight'] _a : Optional[int] = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) _a : Dict = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the middle blocks only _a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) _a : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the output blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) _a : str = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } for i in range(1 , lowerCAmelCase_ ): _a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1) _a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1) _a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: _a : List[Any] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] _a : Union[str, Any] = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue _a : Any = renew_resnet_paths(lowerCAmelCase_ ) _a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} _a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ ) if len(lowerCAmelCase_ ): _a : List[str] = renew_attention_paths(lowerCAmelCase_ ) _a : List[Any] = { 'old': f"""input_blocks.{i}.1""", 'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : Optional[Any] = { f"""input_blocks.{i}.1.qkv.bias""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , ) _a : str = middle_blocks[0] _a : Tuple = middle_blocks[1] _a : Any = middle_blocks[2] _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : Any = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : int = renew_attention_paths(lowerCAmelCase_ ) _a : int = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ): _a : List[str] = i // (config['num_res_blocks'] + 1) _a : Any = i % (config['num_res_blocks'] + 1) _a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]] _a : Optional[Any] = {} for layer in output_block_layers: _a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(lowerCAmelCase_ ) else: _a : str = [layer_name] if len(lowerCAmelCase_ ) > 1: _a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] _a : Dict = renew_resnet_paths(lowerCAmelCase_ ) _a : str = renew_resnet_paths(lowerCAmelCase_ ) _a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) _a : Tuple = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] _a : List[str] = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(lowerCAmelCase_ ) == 2: _a : Union[str, Any] = [] if len(lowerCAmelCase_ ): _a : Tuple = renew_attention_paths(lowerCAmelCase_ ) _a : str = { 'old': f"""output_blocks.{i}.1""", 'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : List[Any] = { f"""output_blocks.{i}.1.qkv.bias""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , ) else: _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] ) _a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] ) _a : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: __lowerCAmelCase = json.loads(f.read()) __lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
89
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''microsoft/table-transformer-detection''': ( '''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json''' ), } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : List[Any] = 'table-transformer' lowerCAmelCase : str = ['past_key_values'] lowerCAmelCase : Any = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : str ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[str]=3 ,_UpperCAmelCase : int=100 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : int=2048 ,_UpperCAmelCase : List[str]=8 ,_UpperCAmelCase : Tuple=6 ,_UpperCAmelCase : str=2048 ,_UpperCAmelCase : str=8 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Tuple="relu" ,_UpperCAmelCase : Any=256 ,_UpperCAmelCase : Optional[Any]=0.1 ,_UpperCAmelCase : Tuple=0.0 ,_UpperCAmelCase : List[Any]=0.0 ,_UpperCAmelCase : Optional[int]=0.02 ,_UpperCAmelCase : Union[str, Any]=1.0 ,_UpperCAmelCase : Optional[Any]=False ,_UpperCAmelCase : Tuple="sine" ,_UpperCAmelCase : Optional[Any]="resnet50" ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Optional[Any]=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : List[Any]=1 ,_UpperCAmelCase : int=1 ,_UpperCAmelCase : Optional[Any]=5 ,_UpperCAmelCase : Dict=2 ,_UpperCAmelCase : int=0.1 ,**_UpperCAmelCase : List[Any] ,): if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) _a : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Any = backbone_config.get('model_type' ) _a : List[str] = CONFIG_MAPPING[backbone_model_type] _a : Dict = config_class.from_dict(_UpperCAmelCase ) # set timm attributes to None _a , _a , _a : int = None, None, None _a : str = use_timm_backbone _a : List[Any] = backbone_config _a : Tuple = num_channels _a : List[str] = num_queries _a : Tuple = d_model _a : Optional[Any] = encoder_ffn_dim _a : Tuple = encoder_layers _a : List[str] = encoder_attention_heads _a : List[str] = decoder_ffn_dim _a : Tuple = decoder_layers _a : Any = decoder_attention_heads _a : int = dropout _a : Optional[int] = attention_dropout _a : Optional[int] = activation_dropout _a : int = activation_function _a : Optional[int] = init_std _a : List[Any] = init_xavier_std _a : Dict = encoder_layerdrop _a : str = decoder_layerdrop _a : Optional[Any] = encoder_layers _a : Optional[int] = auxiliary_loss _a : List[str] = position_embedding_type _a : Tuple = backbone _a : List[str] = use_pretrained_backbone _a : List[str] = dilation # Hungarian matcher _a : Dict = class_cost _a : Union[str, Any] = bbox_cost _a : str = giou_cost # Loss coefficients _a : str = mask_loss_coefficient _a : Tuple = dice_loss_coefficient _a : Optional[int] = bbox_loss_coefficient _a : List[str] = giou_loss_coefficient _a : List[Any] = eos_coefficient super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase ) @property def __lowercase ( self : Tuple ): return self.encoder_attention_heads @property def __lowercase ( self : Tuple ): return self.d_model class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : int = version.parse('1.11' ) @property def __lowercase ( self : int ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ] ) @property def __lowercase ( self : Tuple ): return 1E-5 @property def __lowercase ( self : Tuple ): return 12
89
'''simple docstring''' import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array: _a : Optional[int] = f"""{sampling_rate}""" _a : Any = '1' _a : Optional[int] = 'f32le' _a : Any = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: _a : int = ffmpeg_process.communicate(lowerCAmelCase_ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error _a : int = output_stream[0] _a : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "f32le" , ) -> Union[str, Any]: _a : List[str] = f"""{sampling_rate}""" _a : List[str] = '1' if format_for_conversion == "s16le": _a : List[Any] = 2 elif format_for_conversion == "f32le": _a : Dict = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) _a : Any = platform.system() if system == "Linux": _a : Union[str, Any] = 'alsa' _a : Union[str, Any] = 'default' elif system == "Darwin": _a : Any = 'avfoundation' _a : Optional[int] = ':0' elif system == "Windows": _a : str = 'dshow' _a : Tuple = 'default' _a : str = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] _a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _a : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ ) for item in iterator: yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "f32le" , ) -> str: if stream_chunk_s is not None: _a : str = stream_chunk_s else: _a : List[str] = chunk_length_s _a : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ ) if format_for_conversion == "s16le": _a : Optional[Any] = np.intaa _a : List[Any] = 2 elif format_for_conversion == "f32le": _a : Tuple = np.floataa _a : Any = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: _a : str = chunk_length_s / 6 _a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCAmelCase_ , (int, float) ): _a : List[str] = [stride_length_s, stride_length_s] _a : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _a : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _a : Any = datetime.datetime.now() _a : Dict = datetime.timedelta(seconds=lowerCAmelCase_ ) for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ): # Put everything back in numpy scale _a : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ ) _a : List[str] = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) _a : Union[str, Any] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[Any]: _a : Tuple = B'' _a , _a : str = stride if stride_left + stride_right >= chunk_len: raise ValueError( f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) _a : Optional[int] = 0 for raw in iterator: acc += raw if stream and len(lowerCAmelCase_ ) < chunk_len: _a : str = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCAmelCase_ ) >= chunk_len: # We are flushing the accumulator _a : Union[str, Any] = (_stride_left, stride_right) _a : Dict = {'raw': acc[:chunk_len], 'stride': stride} if stream: _a : List[str] = False yield item _a : int = stride_left _a : List[Any] = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCAmelCase_ ) > stride_left: _a : str = {'raw': acc, 'stride': (_stride_left, 0)} if stream: _a : str = False yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _a : Optional[Any] = 2**24 # 16Mo try: with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process: while True: _a : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
89
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase = { '''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''], '''convert_funnel_original_tf_checkpoint_to_pytorch''': [], '''tokenization_funnel''': ['''FunnelTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ['''FunnelTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FunnelBaseModel''', '''FunnelForMaskedLM''', '''FunnelForMultipleChoice''', '''FunnelForPreTraining''', '''FunnelForQuestionAnswering''', '''FunnelForSequenceClassification''', '''FunnelForTokenClassification''', '''FunnelModel''', '''FunnelPreTrainedModel''', '''load_tf_weights_in_funnel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFFunnelBaseModel''', '''TFFunnelForMaskedLM''', '''TFFunnelForMultipleChoice''', '''TFFunnelForPreTraining''', '''TFFunnelForQuestionAnswering''', '''TFFunnelForSequenceClassification''', '''TFFunnelForTokenClassification''', '''TFFunnelModel''', '''TFFunnelPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
'''simple docstring''' __lowerCAmelCase = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : List[Any] = set() # keep track of all the paths to be checked _a : Any = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _a : Tuple = queue.pop(0 ) # get the last node from the path _a : Tuple = path[-1] if node not in explored: _a : Optional[Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _a : Any = list(lowerCAmelCase_ ) new_path.append(lowerCAmelCase_ ) queue.append(lowerCAmelCase_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(lowerCAmelCase_ ) # in case there's no path between the 2 nodes return [] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _a : Optional[int] = [start] _a : Dict = set(lowerCAmelCase_ ) # Keep tab on distances from `start` node. _a : Dict = {start: 0, target: -1} while queue: _a : List[str] = queue.pop(0 ) if node == target: _a : Any = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(lowerCAmelCase_ ) queue.append(lowerCAmelCase_ ) _a : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
89
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCAmelCase = { '''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''], '''tokenization_ctrl''': ['''CTRLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CTRLForSequenceClassification''', '''CTRLLMHeadModel''', '''CTRLModel''', '''CTRLPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCTRLForSequenceClassification''', '''TFCTRLLMHeadModel''', '''TFCTRLModel''', '''TFCTRLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwinForImageClassification''', '''SwinForMaskedImageModeling''', '''SwinModel''', '''SwinPreTrainedModel''', '''SwinBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSwinForImageClassification''', '''TFSwinForMaskedImageModeling''', '''TFSwinModel''', '''TFSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __lowerCAmelCase = 16 __lowerCAmelCase = 32 def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 16 , lowerCAmelCase_ = "bert-base-cased" ) -> str: _a : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) _a : int = load_dataset('glue' , 'mrpc' ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) _a : str = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _a : str = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=lowerCAmelCase_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _a : int = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase_ , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(lowerCAmelCase_ , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. _a : Optional[int] = DataLoader( tokenized_datasets['train'] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) _a : int = DataLoader( tokenized_datasets['validation'] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: model.eval() _a : List[str] = 0 for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _a : int = model(**lowerCAmelCase_ ) _a : Optional[int] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _a , _a : List[str] = accelerator.gather( (predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowerCAmelCase_ ) - 1: _a : int = predictions[: len(eval_dataloader.dataset ) - samples_seen] _a : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) _a : List[Any] = metric.compute() return eval_metric["accuracy"] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: # Initialize accelerator _a : Optional[int] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _a : List[str] = config['lr'] _a : int = int(config['num_epochs'] ) _a : Union[str, Any] = int(config['seed'] ) _a : Optional[Any] = int(config['batch_size'] ) _a : List[str] = args.model_name_or_path set_seed(lowerCAmelCase_ ) _a , _a : Dict = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _a : Tuple = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) # Instantiate optimizer _a : List[str] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _a : Tuple = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase_ ) if accelerator.state.deepspeed_plugin is not None: _a : int = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: _a : Any = 1 _a : List[Any] = (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _a : Any = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase_ , ) else: _a : int = DummyScheduler(lowerCAmelCase_ , total_num_steps=lowerCAmelCase_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _a , _a , _a , _a , _a : Dict = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # We need to keep track of how many total steps we have iterated over _a : str = 0 # We also need to keep track of the stating epoch so files are named properly _a : Optional[Any] = 0 _a : List[Any] = evaluate.load('glue' , 'mrpc' ) _a : List[Any] = num_epochs if args.partial_train_epoch is not None: _a : Dict = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) _a : Any = args.resume_from_checkpoint.split('epoch_' )[1] _a : Optional[int] = '' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break _a : Dict = int(lowerCAmelCase_ ) + 1 _a : Dict = evaluation_loop(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) accelerator.print('resumed checkpoint performance:' , lowerCAmelCase_ ) accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] ) accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] ) with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , 'r' ) as f: _a : str = json.load(lowerCAmelCase_ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model _a : Any = {} for epoch in range(lowerCAmelCase_ , lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): _a : Optional[int] = model(**lowerCAmelCase_ ) _a : str = outputs.loss _a : str = loss / gradient_accumulation_steps accelerator.backward(lowerCAmelCase_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 _a : List[Any] = f"""epoch_{epoch}""" _a : Union[str, Any] = os.path.join(args.output_dir , lowerCAmelCase_ ) accelerator.save_state(lowerCAmelCase_ ) _a : List[str] = evaluation_loop(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _a : Optional[Any] = accuracy _a : List[Any] = lr_scheduler.get_lr()[0] _a : Any = optimizer.param_groups[0]['lr'] _a : Dict = epoch _a : List[Any] = overall_step accelerator.print(f"""epoch {epoch}:""" , lowerCAmelCase_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , 'w' ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCamelCase ( ) -> List[Any]: _a : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=lowerCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=lowerCAmelCase_ , ) parser.add_argument( '--output_dir' , type=lowerCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--resume_from_checkpoint' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='If the training should continue from a checkpoint folder.' , ) parser.add_argument( '--partial_train_epoch' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='If passed, the training will stop after this number of epochs.' , ) parser.add_argument( '--num_epochs' , type=lowerCAmelCase_ , default=2 , help='Number of train epochs.' , ) _a : Tuple = parser.parse_args() _a : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
89
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Optional[int] = BarthezTokenizer lowerCAmelCase : int = BarthezTokenizerFast lowerCAmelCase : Dict = True lowerCAmelCase : str = True def __lowercase ( self : List[Any] ): super().setUp() _a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase ) _a : Union[str, Any] = tokenizer def __lowercase ( self : Tuple ): _a : Optional[Any] = '<pad>' _a : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase ) def __lowercase ( self : str ): _a : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<s>' ) self.assertEqual(vocab_keys[1] ,'<pad>' ) self.assertEqual(vocab_keys[-1] ,'<mask>' ) self.assertEqual(len(_UpperCAmelCase ) ,101122 ) def __lowercase ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size ,101122 ) @require_torch def __lowercase ( self : Dict ): _a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _a : Dict = [0, 57, 3018, 70307, 91, 2] _a : Dict = self.tokenizer( _UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) self.assertEqual((2, 6) ,batch.input_ids.shape ) self.assertEqual((2, 6) ,batch.attention_mask.shape ) _a : Tuple = batch.input_ids.tolist()[0] self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): if not self.test_rust_tokenizer: return _a : str = self.get_tokenizer() _a : List[str] = self.get_rust_tokenizer() _a : Dict = 'I was born in 92000, and this is falsé.' _a : List[Any] = tokenizer.tokenize(_UpperCAmelCase ) _a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Union[str, Any] = self.get_rust_tokenizer() _a : Any = tokenizer.encode(_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) @slow def __lowercase ( self : Optional[int] ): # fmt: off _a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _a : Optional[Any] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
89
1
'''simple docstring''' import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging __lowerCAmelCase = logging.get_logger(__name__) logging.set_verbosity_info() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]: if "xprophetnet" in prophetnet_checkpoint_path: _a : List[Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) _a , _a : int = XLMProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) else: _a : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ ) _a , _a : Union[str, Any] = ProphetNetForConditionalGeneration.from_pretrained( lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) _a : Any = ['key_proj', 'value_proj', 'query_proj'] _a : Dict = { 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: _a : Union[str, Any] = key.split('.' ) if attributes[0] == "lm_head": _a : Optional[Any] = prophet _a : str = prophet_old else: _a : List[str] = prophet.prophetnet _a : Optional[Any] = prophet_old.model _a : Optional[Any] = False for attribute in attributes: if attribute in mapping: _a : Tuple = mapping[attribute] if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0: _a : Tuple = attribute elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): _a : Optional[int] = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" _a : Any = old_model.weight logger.info(f"""{attribute} is initialized.""" ) _a : List[Any] = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" _a : Optional[Any] = old_model.bias logger.info(f"""{attribute} is initialized""" ) _a : List[str] = True break elif attribute in special_keys and hasattr(lowerCAmelCase_ , 'in_proj_weight' ): _a : Optional[Any] = old_model.in_proj_weight.shape[0] // 3 _a : Dict = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": _a : Dict = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) _a : Tuple = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": _a : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) _a : Optional[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": _a : Optional[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) _a : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) _a : Dict = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." _a : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] ) _a : Optional[int] = True break if attribute.isdigit(): _a : Tuple = model[int(lowerCAmelCase_ )] _a : Dict = old_model[int(lowerCAmelCase_ )] else: _a : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if old_attribute == "": _a : Optional[Any] = old_model else: if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError(f"""{old_model} does not have {old_attribute}""" ) _a : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if not is_key_init: raise ValueError(f"""{key} was not correctly initialized!""" ) print(f"""Saving model to {pytorch_dump_folder_path}""" ) prophet.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __lowerCAmelCase = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
89
'''simple docstring''' import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __magic_name__ ( _UpperCamelCase ): @require_torch def __lowercase ( self : Tuple ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _a : List[Any] = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(_UpperCAmelCase ) BertModel.from_pretrained(_UpperCAmelCase ) BertTokenizer.from_pretrained(_UpperCAmelCase ) pipeline(task='fill-mask' ,model=_UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _a : Tuple = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : int = '1' _a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : Any ): # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _a : int = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(_UpperCAmelCase ) BertModel.from_pretrained(_UpperCAmelCase ) BertTokenizer.from_pretrained(_UpperCAmelCase ) pipeline(task='fill-mask' ,model=_UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _a : str = self.get_env() _a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : List[str] ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n ' _a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n ' _a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n ' # baseline - just load from_pretrained with normal network _a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _a : Dict = self.get_env() _a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) # next emulate no network _a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : int = '1' _a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : int ): _a : Optional[Any] = '\nfrom transformers import pipeline\n ' _a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n ' _a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n ' _a : List[Any] = self.get_env() _a : Dict = '1' _a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )] _a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,1 ,result.stderr ) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,) @require_torch def __lowercase ( self : int ): _a : Optional[int] = '\nfrom transformers import AutoModel\n ' _a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n ' # baseline - just load from_pretrained with normal network _a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _a : Tuple = self.get_env() _a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : Optional[Any] = '1' _a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() )
89
1
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Optional[int] = FlaxAutoencoderKL @property def __lowercase ( self : str ): _a : Dict = 4 _a : Dict = 3 _a : str = (32, 32) _a : Tuple = jax.random.PRNGKey(0 ) _a : List[str] = jax.random.uniform(_UpperCAmelCase ,((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def __lowercase ( self : Tuple ): _a : Any = { 'block_out_channels': [32, 64], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } _a : Any = self.dummy_input return init_dict, inputs_dict
89
'''simple docstring''' def __lowerCamelCase ( ) -> Tuple: for n in range(1 , 1000000 ): yield n * (n + 1) // 2 def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]: _a : Any = 1 _a : Tuple = 2 while i * i <= n: _a : Tuple = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def __lowerCamelCase ( ) -> str: return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 ) if __name__ == "__main__": print(solution())
89
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''SCUT-DLVCLab/lilt-roberta-en-base''': ( '''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json''' ), } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Tuple = 'lilt' def __init__( self : Optional[int] ,_UpperCAmelCase : int=30522 ,_UpperCAmelCase : Dict=768 ,_UpperCAmelCase : Dict=12 ,_UpperCAmelCase : str=12 ,_UpperCAmelCase : Dict=3072 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : Dict=512 ,_UpperCAmelCase : Union[str, Any]=2 ,_UpperCAmelCase : Any=0.02 ,_UpperCAmelCase : Tuple=1E-12 ,_UpperCAmelCase : List[str]=0 ,_UpperCAmelCase : Union[str, Any]="absolute" ,_UpperCAmelCase : Any=None ,_UpperCAmelCase : Optional[Any]=4 ,_UpperCAmelCase : int=1024 ,**_UpperCAmelCase : Union[str, Any] ,): super().__init__(pad_token_id=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Any = vocab_size _a : List[str] = hidden_size _a : Any = num_hidden_layers _a : int = num_attention_heads _a : Optional[int] = hidden_act _a : int = intermediate_size _a : Optional[Any] = hidden_dropout_prob _a : List[Any] = attention_probs_dropout_prob _a : Optional[int] = max_position_embeddings _a : Tuple = type_vocab_size _a : str = initializer_range _a : List[str] = layer_norm_eps _a : Optional[Any] = position_embedding_type _a : Any = classifier_dropout _a : Union[str, Any] = channel_shrink_ratio _a : int = max_ad_position_embeddings
89
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class __magic_name__ ( _UpperCamelCase ): def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,): super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Tuple = Sql( cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,) def __lowercase ( self : Dict ): _a : Optional[Any] = None _a : Dict = None _a : Dict = None _a : Optional[int] = None self.builder.download_and_prepare( download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,) # Build dataset for splits _a : List[str] = self.builder.as_dataset( split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory ) return dataset class __magic_name__ : def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _a : Dict = dataset _a : List[Any] = name _a : Tuple = con _a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _a : List[Any] = num_proc _a : Tuple = to_sql_kwargs def __lowercase ( self : List[Any] ): _a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase ) _a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase ) _a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase ) _a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs ) return written def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ): _a , _a , _a : Any = args _a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs _a : Dict = query_table( table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,) _a : Tuple = batch.to_pandas() _a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase ) return num_rows or len(_UpperCAmelCase ) def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ): _a : Union[str, Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _a , _a : List[Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += num_rows return written
89
1
'''simple docstring''' import math def __lowerCamelCase ( ) -> None: _a : str = input('Enter message: ' ) _a : int = int(input(f"""Enter key [2-{len(lowerCAmelCase_ ) - 1}]: """ ) ) _a : List[str] = input('Encryption/Decryption [e/d]: ' ) if mode.lower().startswith('e' ): _a : str = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ ) elif mode.lower().startswith('d' ): _a : Union[str, Any] = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f"""Output:\n{text + '|'}""" ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _a : List[Any] = [''] * key for col in range(lowerCAmelCase_ ): _a : Optional[Any] = col while pointer < len(lowerCAmelCase_ ): cipher_text[col] += message[pointer] pointer += key return "".join(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _a : Optional[int] = math.ceil(len(lowerCAmelCase_ ) / key ) _a : Optional[Any] = key _a : int = (num_cols * num_rows) - len(lowerCAmelCase_ ) _a : Optional[Any] = [''] * num_cols _a : Any = 0 _a : Dict = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): _a : Dict = 0 row += 1 return "".join(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
89
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray: _a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ ) return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) ) if __name__ == "__main__": # read original image __lowerCAmelCase = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value __lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __lowerCAmelCase , __lowerCAmelCase = gray_img.shape # set different points to rotate image __lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __lowerCAmelCase = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __lowerCAmelCase = plt.figure(1) __lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
89
1
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) __lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]: for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: _a : List[Any] = model_type_to_module_name(lowerCAmelCase_ ) _a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' ) try: return getattr(lowerCAmelCase_ , lowerCAmelCase_ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _a : Dict = importlib.import_module('transformers' ) if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): return getattr(lowerCAmelCase_ , lowerCAmelCase_ ) return None def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple: _a : List[str] = get_file_from_repo( lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , ) if resolved_config_file is None: logger.info( 'Could not locate the image processor configuration file, will try to use the model config instead.' ) return {} with open(lowerCAmelCase_ , encoding='utf-8' ) as reader: return json.load(lowerCAmelCase_ ) class __magic_name__ : def __init__( self : List[str] ): raise EnvironmentError( 'AutoImageProcessor is designed to be instantiated ' 'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(_UpperCAmelCase ) def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ): _a : Any = kwargs.pop('config' ,_UpperCAmelCase ) _a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase ) _a : Any = True _a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase ) _a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase ) _a : int = None if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ): _a : Any = config_dict['auto_map']['AutoImageProcessor'] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: _a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase ) if feature_extractor_class is not None: logger.warning( 'Could not find image processor class in the image processor config or the model config. Loading' ' based on pattern matching with the model\'s feature extractor configuration.' ) _a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' ) if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ): _a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor'] _a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' ) logger.warning( 'Could not find image processor auto map in the image processor config or the model config.' ' Loading based on pattern matching with the model\'s feature extractor configuration.' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) # It could be in `config.image_processor_type`` _a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase ) if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map: _a : Union[str, Any] = config.auto_map['AutoImageProcessor'] if image_processor_class is not None: _a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase ) _a : List[str] = image_processor_auto_map is not None _a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING _a : Optional[int] = resolve_trust_remote_code( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) if has_remote_code and trust_remote_code: _a : Dict = get_class_from_dynamic_module( _UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase ) _a : int = kwargs.pop('code_revision' ,_UpperCAmelCase ) if os.path.isdir(_UpperCAmelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING: _a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )] return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) raise ValueError( F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """ F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ): IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = '''https://openaipublic.azureedge.net/jukebox/models/''' __lowerCAmelCase = { '''jukebox-1b-lyrics''': [ '''5b/vqvae.pth.tar''', '''5b/prior_level_0.pth.tar''', '''5b/prior_level_1.pth.tar''', '''1b_lyrics/prior_level_2.pth.tar''', ], '''jukebox-5b-lyrics''': [ '''5b/vqvae.pth.tar''', '''5b/prior_level_0.pth.tar''', '''5b/prior_level_1.pth.tar''', '''5b_lyrics/prior_level_2.pth.tar''', ], } def __lowerCamelCase ( lowerCAmelCase_ ) -> str: if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10: _a : Optional[Any] = key.replace('.model.1.bias' , '.conv1d_1.bias' ) elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10: _a : List[Any] = key.replace('.model.1.weight' , '.conv1d_1.weight' ) elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10: _a : Dict = key.replace('.model.3.bias' , '.conv1d_2.bias' ) elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10: _a : Any = key.replace('.model.3.weight' , '.conv1d_2.weight' ) if "conditioner_blocks.0." in key: _a : str = key.replace('conditioner_blocks.0' , 'conditioner_blocks' ) if "prime_prior" in key: _a : Any = key.replace('prime_prior' , 'encoder' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: _a : Any = key.replace('.emb.' , '.' ) if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('.k' , '.codebook' ) if "y_emb." in key: return key.replace('y_emb.' , 'metadata_embedding.' ) if "x_emb.emb." in key: _a : Any = key.replace('0.x_emb.emb' , 'embed_tokens' ) if "prime_state_ln" in key: return key.replace('prime_state_ln' , 'encoder.final_layer_norm' ) if ".ln" in key: return key.replace('.ln' , '.layer_norm' ) if "_ln" in key: return key.replace('_ln' , '_layer_norm' ) if "prime_state_proj" in key: return key.replace('prime_state_proj' , 'encoder.proj_in' ) if "prime_x_out" in key: return key.replace('prime_x_out' , 'encoder.lm_head' ) if "prior.x_out" in key: return key.replace('x_out' , 'fc_proj_out' ) if "x_emb" in key: return key.replace('x_emb' , 'embed_tokens' ) return key def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: _a : int = {} import re _a : List[str] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) _a : List[Any] = re.compile( r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) _a : Optional[Any] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) _a : Union[str, Any] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' ) _a : int = re.compile( r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) _a : Optional[Any] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' ) _a : Dict = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' ) _a : Optional[int] = re.compile( r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' ) _a : str = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(lowerCAmelCase_ ): _a : List[str] = re_encoder_block_conv_in.match(lowerCAmelCase_ ) _a : int = regex_match.groups() _a : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) _a : Dict = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}""" _a : Dict = re_encoder_block_conv_in.sub(lowerCAmelCase_ , lowerCAmelCase_ ) elif re_encoder_block_resnet.fullmatch(lowerCAmelCase_ ): _a : Optional[int] = re_encoder_block_resnet.match(lowerCAmelCase_ ) _a : Any = regex_match.groups() _a : Dict = int(groups[2] ) * 2 + int(groups[3] ) _a : Any = {'1': 1, '3': 2}[groups[-2]] _a : Tuple = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.""" _a : Union[str, Any] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" _a : Any = prefix + resnet_block _a : Optional[Any] = re_encoder_block_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ ) elif re_encoder_block_proj_out.fullmatch(lowerCAmelCase_ ): _a : Union[str, Any] = re_encoder_block_proj_out.match(lowerCAmelCase_ ) _a : Dict = regex_match.groups() _a : Any = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}""" _a : Tuple = re_encoder_block_proj_out.sub(lowerCAmelCase_ , lowerCAmelCase_ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(lowerCAmelCase_ ): _a : Dict = re_decoder_block_conv_out.match(lowerCAmelCase_ ) _a : Union[str, Any] = regex_match.groups() _a : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2 _a : List[str] = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}""" _a : List[str] = re_decoder_block_conv_out.sub(lowerCAmelCase_ , lowerCAmelCase_ ) elif re_decoder_block_resnet.fullmatch(lowerCAmelCase_ ): _a : int = re_decoder_block_resnet.match(lowerCAmelCase_ ) _a : List[str] = regex_match.groups() _a : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2 _a : Union[str, Any] = {'1': 1, '3': 2}[groups[-2]] _a : Any = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.""" _a : Tuple = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" _a : List[Any] = prefix + resnet_block _a : str = re_decoder_block_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ ) elif re_decoder_block_proj_in.fullmatch(lowerCAmelCase_ ): _a : List[str] = re_decoder_block_proj_in.match(lowerCAmelCase_ ) _a : List[Any] = regex_match.groups() _a : int = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}""" _a : Dict = re_decoder_block_proj_in.sub(lowerCAmelCase_ , lowerCAmelCase_ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(lowerCAmelCase_ ): _a : Optional[Any] = re_prior_cond_conv_out.match(lowerCAmelCase_ ) _a : Tuple = regex_match.groups() _a : List[str] = int(groups[1] ) * 2 + int(groups[2] ) - 2 _a : Tuple = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}""" _a : List[str] = re_prior_cond_conv_out.sub(lowerCAmelCase_ , lowerCAmelCase_ ) elif re_prior_cond_resnet.fullmatch(lowerCAmelCase_ ): _a : str = re_prior_cond_resnet.match(lowerCAmelCase_ ) _a : Tuple = regex_match.groups() _a : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 _a : Any = {'1': 1, '3': 2}[groups[-2]] _a : int = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.""" _a : List[str] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}""" _a : Union[str, Any] = prefix + resnet_block _a : Any = re_prior_cond_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ ) elif re_prior_cond_proj_in.fullmatch(lowerCAmelCase_ ): _a : Dict = re_prior_cond_proj_in.match(lowerCAmelCase_ ) _a : Tuple = regex_match.groups() _a : Optional[int] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}""" _a : List[str] = re_prior_cond_proj_in.sub(lowerCAmelCase_ , lowerCAmelCase_ ) # keep original key else: _a : Tuple = original_key _a : int = replace_key(lowerCAmelCase_ ) if f"""{key_prefix}.{key}""" not in model_state_dict or key is None: print(f"""failed converting {original_key} to {key}, does not match""" ) # handle missmatched shape elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape: _a : Dict = model_state_dict[f"""{key_prefix}.{key}"""] print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" ) _a : Union[str, Any] = original_key _a : Any = original_key _a : Any = value return new_dict @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Optional[int]: for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ): _a : List[str] = requests.get(f"""{PREFIX}{file}""" , allow_redirects=lowerCAmelCase_ ) os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=lowerCAmelCase_ ) open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content ) _a : str = MODEL_MAPPING[model_name.split('/' )[-1]] _a : List[Any] = JukeboxConfig.from_pretrained(lowerCAmelCase_ ) _a : Tuple = JukeboxModel(lowerCAmelCase_ ) _a : Optional[int] = [] _a : int = {} for i, dict_name in enumerate(lowerCAmelCase_ ): _a : int = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model'] _a : Dict = {} for k in old_dic.keys(): if k.endswith('.b' ): _a : Optional[Any] = old_dic[k] elif k.endswith('.w' ): _a : str = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: _a : Union[str, Any] = old_dic[k] else: _a : str = old_dic[k] _a : Optional[int] = 'vqvae' if i == 0 else f"""priors.{3 - i}""" _a : int = fix_jukebox_keys(lowerCAmelCase_ , model.state_dict() , lowerCAmelCase_ , lowerCAmelCase_ ) weight_dict.append(lowerCAmelCase_ ) _a : Any = weight_dict.pop(0 ) model.vqvae.load_state_dict(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) with open(f"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase_ ) return weight_dict if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''jukebox-5b-lyrics''', type=str, help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''jukebox-5b-lyrics-converted''', type=str, help='''Path to the output PyTorch model directory.''', ) __lowerCAmelCase = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
89
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]: _a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) _a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ ) _a : List[str] = tok.pad_token_id def get_lens(lowerCAmelCase_ ): _a : Dict = tqdm( DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _a : Union[str, Any] = [] for batch in dl: _a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist() _a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ): max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) ) else: max_lens.extend(lowerCAmelCase_ ) return max_lens _a : str = get_lens(lowerCAmelCase_ ) _a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ ) _a : Dict = get_lens(lowerCAmelCase_ ) pickle_save(lowerCAmelCase_ , train_ds.len_file ) pickle_save(lowerCAmelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
89
1
'''simple docstring''' import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __magic_name__ ( _UpperCamelCase ): def __init__( self : Optional[Any] ,*_UpperCAmelCase : Dict ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : Any=None ,**_UpperCAmelCase : List[str] ): super().__init__(*_UpperCAmelCase ,**_UpperCAmelCase ) _a : Optional[Any] = eval_examples _a : Dict = post_process_function def __lowercase ( self : List[str] ,_UpperCAmelCase : Optional[int]=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : int=None ,_UpperCAmelCase : str = "eval" ): _a : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset _a : Any = self.get_eval_dataloader(_UpperCAmelCase ) _a : str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _a : int = self.compute_metrics _a : Tuple = None _a : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _a : List[Any] = time.time() try: _a : str = eval_loop( _UpperCAmelCase ,description='Evaluation' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_UpperCAmelCase ,metric_key_prefix=_UpperCAmelCase ,) finally: _a : Optional[int] = compute_metrics _a : Optional[Any] = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _UpperCAmelCase ,_UpperCAmelCase ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _a : Tuple = self.post_process_function(_UpperCAmelCase ,_UpperCAmelCase ,output.predictions ) _a : int = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): _a : List[Any] = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) else: _a : Dict = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _a : Union[str, Any] = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,_UpperCAmelCase ) return metrics def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : str = "test" ): _a : List[Any] = self.get_test_dataloader(_UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _a : Dict = self.compute_metrics _a : Tuple = None _a : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _a : Optional[int] = time.time() try: _a : Optional[Any] = eval_loop( _UpperCAmelCase ,description='Prediction' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_UpperCAmelCase ,metric_key_prefix=_UpperCAmelCase ,) finally: _a : List[str] = compute_metrics _a : List[str] = self.args.eval_batch_size * self.args.world_size if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( _UpperCAmelCase ,_UpperCAmelCase ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) ) if self.post_process_function is None or self.compute_metrics is None: return output _a : Optional[int] = self.post_process_function(_UpperCAmelCase ,_UpperCAmelCase ,output.predictions ,'predict' ) _a : Tuple = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): _a : Optional[int] = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=_UpperCAmelCase )
89
'''simple docstring''' from typing import Any class __magic_name__ : def __init__( self : List[Any] ,_UpperCAmelCase : Any ): _a : List[Any] = data _a : Union[str, Any] = None def __repr__( self : Any ): return F"""Node({self.data})""" class __magic_name__ : def __init__( self : int ): _a : Tuple = None def __iter__( self : str ): _a : int = self.head while node: yield node.data _a : Union[str, Any] = node.next def __len__( self : Optional[Any] ): return sum(1 for _ in self ) def __repr__( self : str ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __getitem__( self : Tuple ,_UpperCAmelCase : int ): if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ): if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) _a : Any = self.head for _ in range(_UpperCAmelCase ): _a : Optional[Any] = current.next _a : Optional[int] = data def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ): self.insert_nth(len(self ) ,_UpperCAmelCase ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ): self.insert_nth(0 ,_UpperCAmelCase ) def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ): if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) _a : int = Node(_UpperCAmelCase ) if self.head is None: _a : str = new_node elif index == 0: _a : List[str] = self.head # link new_node to head _a : Union[str, Any] = new_node else: _a : int = self.head for _ in range(index - 1 ): _a : Union[str, Any] = temp.next _a : List[str] = temp.next _a : Optional[int] = new_node def __lowercase ( self : Optional[int] ): # print every node data print(self ) def __lowercase ( self : str ): return self.delete_nth(0 ) def __lowercase ( self : str ): # delete from tail return self.delete_nth(len(self ) - 1 ) def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ): if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) _a : Optional[Any] = self.head # default first node if index == 0: _a : int = self.head.next else: _a : int = self.head for _ in range(index - 1 ): _a : str = temp.next _a : str = temp.next _a : int = temp.next.next return delete_node.data def __lowercase ( self : List[Any] ): return self.head is None def __lowercase ( self : Tuple ): _a : List[Any] = None _a : Tuple = self.head while current: # Store the current node's next node. _a : Dict = current.next # Make the current node's next point backwards _a : str = prev # Make the previous node be the current node _a : Tuple = current # Make the current node the next node (to progress iteration) _a : Optional[Any] = next_node # Return prev in order to put the head at the end _a : int = prev def __lowerCamelCase ( ) -> None: _a : List[str] = LinkedList() assert linked_list.is_empty() is True assert str(lowerCAmelCase_ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(lowerCAmelCase_ ) == i linked_list.insert_nth(lowerCAmelCase_ , i + 1 ) assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(lowerCAmelCase_ ) == 9 assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): _a : Union[str, Any] = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) ) def __lowerCamelCase ( ) -> None: _a : Dict = [ -9, 100, Node(77345112 ), 'dlrow olleH', 7, 5555, 0, -192.55_555, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] _a : List[Any] = LinkedList() for i in test_input: linked_list.insert_tail(lowerCAmelCase_ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head _a : List[str] = linked_list.delete_head() assert result == -9 assert ( str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail _a : Dict = linked_list.delete_tail() assert result == 12.2 assert ( str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list _a : Optional[Any] = linked_list.delete_nth(10 ) assert result is None assert ( str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(lowerCAmelCase_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(lowerCAmelCase_ ) assert ( str(lowerCAmelCase_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(lowerCAmelCase_ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def __lowerCamelCase ( ) -> Union[str, Any]: from doctest import testmod testmod() _a : Optional[int] = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(lowerCAmelCase_ ) print('\nReading/changing Node data using indexing:' ) print(f"""Element at Position 1: {linked_list[1]}""" ) _a : Optional[Any] = input('Enter New Value: ' ).strip() print('New list:' ) print(lowerCAmelCase_ ) print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" ) if __name__ == "__main__": main()
89
1
'''simple docstring''' import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]: _a : str = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError('Quantized models are not supported.' ) _a : int = re.match(r'^mobilenet_v1_([^_]*)_([^_]*)$' , lowerCAmelCase_ ) if matches: _a : str = float(matches[1] ) _a : List[Any] = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". _a : int = 1001 _a : str = 'imagenet-1k-id2label.json' _a : Tuple = 'huggingface/label-files' _a : Optional[Any] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) ) _a : List[Any] = {int(lowerCAmelCase_ ) + 1: v for k, v in idalabel.items()} _a : Optional[Any] = 'background' _a : str = idalabel _a : int = {v: k for k, v in idalabel.items()} return config def __lowerCamelCase ( ) -> Dict: _a : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg' _a : Tuple = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[int]: _a : str = get_mobilenet_va_config(lowerCAmelCase_ ) # Load 🤗 model _a : int = MobileNetVaForImageClassification(lowerCAmelCase_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor _a : str = MobileNetVaImageProcessor( crop_size={'width': config.image_size, 'height': config.image_size} , size={'shortest_edge': config.image_size + 32} , ) _a : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' ) _a : Optional[int] = model(**lowerCAmelCase_ ) _a : List[Any] = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": _a : List[str] = torch.tensor([-4.1_739, -1.1_233, 3.1_205] ) elif model_name == "mobilenet_v1_0.75_192": _a : Any = torch.tensor([-3.9_440, -2.3_141, -0.3_333] ) else: _a : str = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: print('Pushing to the hub...' ) _a : List[str] = 'google/' + model_name image_processor.push_to_hub(lowerCAmelCase_ ) model.push_to_hub(lowerCAmelCase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''mobilenet_v1_1.0_224''', type=str, help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''', ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) __lowerCAmelCase = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
89
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __lowerCAmelCase = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,): _a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )] if identifier is not None: _a : str = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for n_ in n_identifier: _a : int = [file for file in files if n_ not in file] else: _a : Optional[Any] = [file for file in files if n_identifier not in file] _a : Dict = ignore_files or [] ignore_files.append('__init__.py' ) _a : List[str] = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' ,_UpperCAmelCase ) if only_modules: _a : Any = file.split('.' )[0] try: _a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict = doctest.DocTestSuite(_UpperCAmelCase ) _a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) ,0 ) except AttributeError: logger.info(F"""{module_identifier} is not a module.""" ) else: _a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed ,0 ) def __lowercase ( self : Union[str, Any] ): _a : Optional[Any] = Path('src/transformers' ) _a : Optional[Any] = 'modeling' _a : Union[str, Any] = [ 'modeling_ctrl.py', 'modeling_tf_ctrl.py', ] self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase ) def __lowercase ( self : int ): _a : str = Path('src/transformers' ) _a : List[str] = 'tokenization' self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ) def __lowercase ( self : int ): _a : Any = Path('src/transformers' ) _a : str = 'configuration' self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ) def __lowercase ( self : Dict ): _a : Tuple = Path('src/transformers' ) _a : Optional[int] = ['configuration', 'modeling', 'tokenization'] self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): _a : Union[str, Any] = Path('docs/source' ) _a : List[str] = ['favicon.ico'] self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
89
1
'''simple docstring''' class __magic_name__ : def __init__( self : str ,_UpperCAmelCase : list[int] ): _a : List[Any] = len(_UpperCAmelCase ) _a : Optional[Any] = [0] * len_array if len_array > 0: _a : List[Any] = array[0] for i in range(1 ,_UpperCAmelCase ): _a : Any = self.prefix_sum[i - 1] + array[i] def __lowercase ( self : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ): if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def __lowercase ( self : Dict ,_UpperCAmelCase : int ): _a : Optional[Any] = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_UpperCAmelCase ) return False if __name__ == "__main__": import doctest doctest.testmod()
89
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) __lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]: for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: _a : List[Any] = model_type_to_module_name(lowerCAmelCase_ ) _a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' ) try: return getattr(lowerCAmelCase_ , lowerCAmelCase_ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _a : Dict = importlib.import_module('transformers' ) if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): return getattr(lowerCAmelCase_ , lowerCAmelCase_ ) return None def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple: _a : List[str] = get_file_from_repo( lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , ) if resolved_config_file is None: logger.info( 'Could not locate the image processor configuration file, will try to use the model config instead.' ) return {} with open(lowerCAmelCase_ , encoding='utf-8' ) as reader: return json.load(lowerCAmelCase_ ) class __magic_name__ : def __init__( self : List[str] ): raise EnvironmentError( 'AutoImageProcessor is designed to be instantiated ' 'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(_UpperCAmelCase ) def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ): _a : Any = kwargs.pop('config' ,_UpperCAmelCase ) _a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase ) _a : Any = True _a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase ) _a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase ) _a : int = None if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ): _a : Any = config_dict['auto_map']['AutoImageProcessor'] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: _a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase ) if feature_extractor_class is not None: logger.warning( 'Could not find image processor class in the image processor config or the model config. Loading' ' based on pattern matching with the model\'s feature extractor configuration.' ) _a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' ) if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ): _a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor'] _a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' ) logger.warning( 'Could not find image processor auto map in the image processor config or the model config.' ' Loading based on pattern matching with the model\'s feature extractor configuration.' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) # It could be in `config.image_processor_type`` _a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase ) if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map: _a : Union[str, Any] = config.auto_map['AutoImageProcessor'] if image_processor_class is not None: _a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase ) _a : List[str] = image_processor_auto_map is not None _a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING _a : Optional[int] = resolve_trust_remote_code( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) if has_remote_code and trust_remote_code: _a : Dict = get_class_from_dynamic_module( _UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase ) _a : int = kwargs.pop('code_revision' ,_UpperCAmelCase ) if os.path.isdir(_UpperCAmelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING: _a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )] return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) raise ValueError( F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """ F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ): IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
89
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( UniSpeechConfig, UniSpeechForCTC, UniSpeechForPreTraining, WavaVecaFeatureExtractor, WavaVecaPhonemeCTCTokenizer, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } __lowerCAmelCase = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int: for attribute in key.split('.' ): if is_finetuned: if attribute in ["quantizer", "project_q", "project_hid"]: # those layers are only relevant for pretraining and should be dropped return if attribute == "ctc_proj": # we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models _a : List[Any] = 'lm_head' _a : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if weight_type is not None: _a : int = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape else: _a : List[Any] = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _a : Optional[Any] = value elif weight_type == "weight_g": _a : str = value elif weight_type == "weight_v": _a : Optional[int] = value elif weight_type == "bias": _a : Any = value else: _a : Tuple = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _a : List[Any] = [] _a : Dict = fairseq_model.state_dict() _a : Union[str, Any] = hf_model.unispeech.feature_extractor for name, value in fairseq_dict.items(): _a : Tuple = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , ) _a : Union[str, Any] = True else: for key, mapped_key in MAPPING.items(): _a : List[str] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: _a : int = True if "*" in mapped_key: _a : Dict = name.split(lowerCAmelCase_ )[0].split('.' )[-2] _a : int = mapped_key.replace('*' , lowerCAmelCase_ ) if "weight_g" in name: _a : Any = 'weight_g' elif "weight_v" in name: _a : List[Any] = 'weight_v' elif "bias" in name: _a : Union[str, Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj _a : int = 'weight' else: _a : List[str] = None set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) continue if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _a : int = full_name.split('conv_layers.' )[-1] _a : Tuple = name.split('.' ) _a : Optional[Any] = int(items[0] ) _a : List[Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _a : int = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _a : Optional[Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) _a : List[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) _a : Optional[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase_ ) @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True ) -> Dict: if config_path is not None: _a : List[str] = UniSpeechConfig.from_pretrained(lowerCAmelCase_ ) else: _a : Tuple = UniSpeechConfig() if is_finetuned: if dict_path: _a : Optional[Any] = Dictionary.load_from_json(lowerCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _a : Any = target_dict.pad_index _a : Optional[Any] = target_dict.bos_index _a : Optional[int] = target_dict.eos_index _a : List[Any] = len(target_dict.symbols ) _a : Tuple = os.path.join(lowerCAmelCase_ , 'vocab.json' ) if not os.path.isdir(lowerCAmelCase_ ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase_ ) ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) _a : List[str] = target_dict.indices # fairseq has the <pad> and <s> switched _a : List[Any] = 42 _a : Any = 43 with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) _a : Optional[Any] = WavaVecaPhonemeCTCTokenizer( lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase_ , ) _a : List[Any] = True if config.feat_extract_norm == 'layer' else False _a : List[str] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) _a : Dict = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) _a : List[str] = UniSpeechForCTC(lowerCAmelCase_ ) else: _a : int = UniSpeechForPreTraining(lowerCAmelCase_ ) if is_finetuned: _a , _a , _a : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} ) else: _a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _a : Optional[Any] = model[0].eval() recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) hf_unispeech.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __lowerCAmelCase = parser.parse_args() convert_unispeech_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
89
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __lowerCAmelCase = None __lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>''' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __lowerCAmelCase = [ np.dtype('''|b1'''), np.dtype('''|u1'''), np.dtype('''<u2'''), np.dtype('''>u2'''), np.dtype('''<i2'''), np.dtype('''>i2'''), np.dtype('''<u4'''), np.dtype('''>u4'''), np.dtype('''<i4'''), np.dtype('''>i4'''), np.dtype('''<f4'''), np.dtype('''>f4'''), np.dtype('''<f8'''), np.dtype('''>f8'''), ] @dataclass class __magic_name__ : lowerCAmelCase : bool = True lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "PIL.Image.Image" lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase ) def __call__( self : Union[str, Any] ): return self.pa_type def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Optional[Any] = np.array(_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": value, "bytes": None} elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": None, "bytes": value} elif isinstance(_UpperCAmelCase ,np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase ,PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(_UpperCAmelCase ) elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ): if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support decoding images, please install \'Pillow\'.' ) if token_per_repo_id is None: _a : Dict = {} _a , _a : str = value['path'], value['bytes'] if bytes_ is None: if path is None: raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" ) else: if is_local_path(_UpperCAmelCase ): _a : Any = PIL.Image.open(_UpperCAmelCase ) else: _a : List[Any] = path.split('::' )[-1] try: _a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id'] _a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase ) except ValueError: _a : int = None with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f: _a : Tuple = BytesIO(f.read() ) _a : Union[str, Any] = PIL.Image.open(bytes_ ) else: _a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def __lowercase ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value('binary' ), "path": Value('string' ), } ) def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): _a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) _a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: _a : Union[str, Any] = storage.field('bytes' ) else: _a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: _a : Union[str, Any] = storage.field('path' ) else: _a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_list(storage.type ): _a : List[str] = pa.array( [encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,) _a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays( [bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(_UpperCAmelCase : Tuple ): with xopen(_UpperCAmelCase ,'rb' ) as f: _a : int = f.read() return bytes_ _a : Any = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] ,type=pa.binary() ,) _a : Optional[Any] = pa.array( [os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,) _a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes: _a : Optional[int] = BytesIO() if image.format in list_image_compression_formats(): _a : Optional[Any] = image.format else: _a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF' image.save(lowerCAmelCase_ , format=lowerCAmelCase_ ) return buffer.getvalue() def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) _a : List[Any] = array.dtype _a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER _a : Union[str, Any] = dtype.kind _a : Union[str, Any] = dtype.itemsize _a : List[Any] = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _a : Optional[int] = np.dtype('|u1' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _a : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ ) _a : List[Any] = np.dtype(lowerCAmelCase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) _a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) ) return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if objs: _a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowerCAmelCase_ , np.ndarray ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] elif isinstance(lowerCAmelCase_ , PIL.Image.Image ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] else: return objs else: return objs
89
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ ) -> str: _a : Optional[Any] = '' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def __lowerCamelCase ( lowerCAmelCase_ ) -> dict[str, str]: _a : List[str] = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key _a : int = remove_duplicates(key.upper() ) _a : str = len(lowerCAmelCase_ ) # First fill cipher with key characters _a : Dict = {alphabet[i]: char for i, char in enumerate(lowerCAmelCase_ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(lowerCAmelCase_ ) , 26 ): _a : Optional[int] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 _a : Tuple = alphabet[i - offset] _a : Optional[Any] = char return cipher_alphabet def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str: return "".join(cipher_map.get(lowerCAmelCase_ , lowerCAmelCase_ ) for ch in message.upper() ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _a : Dict = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(lowerCAmelCase_ , lowerCAmelCase_ ) for ch in message.upper() ) def __lowerCamelCase ( ) -> None: _a : Optional[int] = input('Enter message to encode or decode: ' ).strip() _a : Dict = input('Enter keyword: ' ).strip() _a : Optional[int] = input('Encipher or decipher? E/D:' ).strip()[0].lower() try: _a : List[str] = {'e': encipher, 'd': decipher}[option] except KeyError: raise KeyError('invalid input option' ) _a : Dict = create_cipher_map(lowerCAmelCase_ ) print(func(lowerCAmelCase_ , lowerCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
89
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]: _a : Optional[int] = list(lowerCAmelCase_ ) _a : Optional[Any] = list(lowerCAmelCase_ ) _a : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if lista[i] != lista[i]: count += 1 _a : Optional[int] = '_' if count > 1: return False else: return "".join(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]: _a : Optional[int] = [] while True: _a : Any = ['$'] * len(lowerCAmelCase_ ) _a : List[str] = [] for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): _a : Optional[int] = compare_string(binary[i] , binary[j] ) if k is False: _a : Optional[Any] = '*' _a : Optional[Any] = '*' temp.append('X' ) for i in range(len(lowerCAmelCase_ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(lowerCAmelCase_ ) == 0: return pi _a : Any = list(set(lowerCAmelCase_ ) ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : int = [] for minterm in minterms: _a : Optional[int] = '' for _ in range(lowerCAmelCase_ ): _a : Union[str, Any] = str(minterm % 2 ) + string minterm //= 2 temp.append(lowerCAmelCase_ ) return temp def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool: _a : int = list(lowerCAmelCase_ ) _a : Union[str, Any] = list(lowerCAmelCase_ ) _a : str = 0 for i in range(len(lowerCAmelCase_ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : List[Any] = [] _a : Optional[Any] = [0] * len(lowerCAmelCase_ ) for i in range(len(chart[0] ) ): _a : Union[str, Any] = 0 _a : int = -1 for j in range(len(lowerCAmelCase_ ) ): if chart[j][i] == 1: count += 1 _a : int = j if count == 1: _a : List[Any] = 1 for i in range(len(lowerCAmelCase_ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(lowerCAmelCase_ ) ): _a : Any = 0 temp.append(prime_implicants[i] ) while True: _a : Union[str, Any] = 0 _a : List[Any] = -1 _a : str = 0 for i in range(len(lowerCAmelCase_ ) ): _a : Union[str, Any] = chart[i].count(1 ) if count_n > max_n: _a : Any = count_n _a : int = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(lowerCAmelCase_ ) ): _a : List[str] = 0 def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]: _a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )] for i in range(len(lowerCAmelCase_ ) ): _a : str = prime_implicants[i].count('_' ) for j in range(len(lowerCAmelCase_ ) ): if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ): _a : Optional[Any] = 1 return chart def __lowerCamelCase ( ) -> None: _a : Optional[int] = int(input('Enter the no. of variables\n' ) ) _a : List[Any] = [ float(lowerCAmelCase_ ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] _a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ ) _a : Dict = check(lowerCAmelCase_ ) print('Prime Implicants are:' ) print(lowerCAmelCase_ ) _a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ ) _a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ ) print('Essential Prime Implicants are:' ) print(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
89
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]: _a : str = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : int = StableDiffusionLatentUpscalePipeline lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'height', 'width', 'cross_attention_kwargs', 'negative_prompt_embeds', 'prompt_embeds', } lowerCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'} lowerCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCAmelCase : Dict = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowerCAmelCase : List[Any] = frozenset([] ) lowerCAmelCase : List[str] = True @property def __lowercase ( self : List[str] ): _a : int = 1 _a : str = 4 _a : str = (16, 16) _a : Dict = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(_UpperCAmelCase ) return image def __lowercase ( self : List[str] ): torch.manual_seed(0 ) _a : Tuple = UNetaDConditionModel( act_fn='gelu' ,attention_head_dim=8 ,norm_num_groups=_UpperCAmelCase ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=160 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=( 'KDownBlock2D', 'KCrossAttnDownBlock2D', 'KCrossAttnDownBlock2D', 'KCrossAttnDownBlock2D', ) ,in_channels=8 ,mid_block_type=_UpperCAmelCase ,only_cross_attention=_UpperCAmelCase ,out_channels=5 ,resnet_time_scale_shift='scale_shift' ,time_embedding_type='fourier' ,timestep_post_act='gelu' ,up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') ,) _a : Dict = AutoencoderKL( block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[ 'DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D', ] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) _a : Optional[int] = EulerDiscreteScheduler(prediction_type='sample' ) _a : List[str] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='quick_gelu' ,projection_dim=512 ,) _a : str = CLIPTextModel(_UpperCAmelCase ) _a : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _a : Dict = { 'unet': model.eval(), 'vae': vae.eval(), 'scheduler': scheduler, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def __lowercase ( self : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int=0 ): if str(_UpperCAmelCase ).startswith('mps' ): _a : List[str] = torch.manual_seed(_UpperCAmelCase ) else: _a : List[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) _a : int = { 'prompt': 'A painting of a squirrel eating a burger', 'image': self.dummy_image.cpu(), 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def __lowercase ( self : int ): _a : Tuple = 'cpu' _a : Tuple = self.get_dummy_components() _a : Any = self.pipeline_class(**_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _a : Dict = self.get_dummy_inputs(_UpperCAmelCase ) _a : List[Any] = pipe(**_UpperCAmelCase ).images _a : int = image[0, -3:, -3:, -1] self.assertEqual(image.shape ,(1, 256, 256, 3) ) _a : Union[str, Any] = np.array( [0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] ) _a : Tuple = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_UpperCAmelCase ,1E-3 ) def __lowercase ( self : Any ): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def __lowercase ( self : Any ): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def __lowercase ( self : str ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __lowercase ( self : str ): super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def __lowercase ( self : List[str] ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def __lowercase ( self : Dict ): super().test_save_load_local(expected_max_difference=3E-3 ) def __lowercase ( self : Union[str, Any] ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def __lowercase ( self : Tuple ): _a : int = [ 'DDIMScheduler', 'DDPMScheduler', 'PNDMScheduler', 'HeunDiscreteScheduler', 'EulerAncestralDiscreteScheduler', 'KDPM2DiscreteScheduler', 'KDPM2AncestralDiscreteScheduler', 'DPMSolverSDEScheduler', ] _a : Optional[int] = self.get_dummy_components() _a : Optional[int] = self.pipeline_class(**_UpperCAmelCase ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _a : Union[str, Any] = self.get_dummy_inputs(_UpperCAmelCase ) _a : Tuple = 2 _a : int = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue _a : int = getattr(_UpperCAmelCase ,scheduler_enum.name ) _a : int = scheduler_cls.from_config(pipe.scheduler.config ) _a : List[str] = pipe(**_UpperCAmelCase )[0] outputs.append(_UpperCAmelCase ) assert check_same_shape(_UpperCAmelCase ) @require_torch_gpu @slow class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : Dict ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : List[str] ): _a : Dict = torch.manual_seed(33 ) _a : str = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' ,torch_dtype=torch.floataa ) pipe.to('cuda' ) _a : List[str] = StableDiffusionLatentUpscalePipeline.from_pretrained( 'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa ) upscaler.to('cuda' ) _a : Optional[Any] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic' _a : Any = pipe(_UpperCAmelCase ,generator=_UpperCAmelCase ,output_type='latent' ).images _a : Optional[int] = upscaler( prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,num_inference_steps=20 ,guidance_scale=0 ,generator=_UpperCAmelCase ,output_type='np' ,).images[0] _a : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' ) assert np.abs((expected_image - image).mean() ) < 5E-2 def __lowercase ( self : Optional[int] ): _a : Any = torch.manual_seed(33 ) _a : Optional[int] = StableDiffusionLatentUpscalePipeline.from_pretrained( 'stabilityai/sd-x2-latent-upscaler' ,torch_dtype=torch.floataa ) upscaler.to('cuda' ) _a : Tuple = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas' _a : Dict = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' ) _a : Optional[Any] = upscaler( prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,num_inference_steps=20 ,guidance_scale=0 ,generator=_UpperCAmelCase ,output_type='np' ,).images[0] _a : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' ) assert np.abs((expected_image - image).max() ) < 5E-2
89
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase = { '''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''], '''tokenization_cpmant''': ['''CpmAntTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CpmAntForCausalLM''', '''CpmAntModel''', '''CpmAntPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {'''vocab_file''': '''vocab.txt'''} __lowerCAmelCase = { '''vocab_file''': { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''', } } __lowerCAmelCase = { '''YituTech/conv-bert-base''': 512, '''YituTech/conv-bert-medium-small''': 512, '''YituTech/conv-bert-small''': 512, } __lowerCAmelCase = { '''YituTech/conv-bert-base''': {'''do_lower_case''': True}, '''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True}, '''YituTech/conv-bert-small''': {'''do_lower_case''': True}, } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES lowerCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : Any = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : Optional[int] = ConvBertTokenizer def __init__( self : Union[str, Any] ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : Dict="[UNK]" ,_UpperCAmelCase : Optional[int]="[SEP]" ,_UpperCAmelCase : Optional[Any]="[PAD]" ,_UpperCAmelCase : List[str]="[CLS]" ,_UpperCAmelCase : List[Any]="[MASK]" ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : Optional[int]=None ,**_UpperCAmelCase : Dict ,): super().__init__( _UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,do_lower_case=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,tokenize_chinese_chars=_UpperCAmelCase ,strip_accents=_UpperCAmelCase ,**_UpperCAmelCase ,) _a : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' ,_UpperCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' ,_UpperCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' ,_UpperCAmelCase ) != tokenize_chinese_chars ): _a : Optional[Any] = getattr(_UpperCAmelCase ,normalizer_state.pop('type' ) ) _a : Any = do_lower_case _a : int = strip_accents _a : str = tokenize_chinese_chars _a : List[str] = normalizer_class(**_UpperCAmelCase ) _a : Tuple = do_lower_case def __lowercase ( self : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Tuple=None ): _a : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowercase ( self : Any ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ): _a : Optional[Any] = [self.sep_token_id] _a : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowercase ( self : str ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ): _a : Union[str, Any] = self._tokenizer.model.save(_UpperCAmelCase ,name=_UpperCAmelCase ) return tuple(_UpperCAmelCase )
89
'''simple docstring''' import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : str = LayoutLMTokenizer lowerCAmelCase : Tuple = LayoutLMTokenizerFast lowerCAmelCase : List[Any] = True lowerCAmelCase : int = True def __lowercase ( self : Dict ): super().setUp() _a : int = [ '[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ): _a : Optional[int] = 'UNwant\u00E9d,running' _a : List[Any] = 'unwanted, running' return input_text, output_text def __lowercase ( self : Optional[int] ): _a : Optional[Any] = self.tokenizer_class(self.vocab_file ) _a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] ) def __lowercase ( self : Optional[int] ): pass
89
1
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __magic_name__ : def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,): _a : Optional[Any] = parent _a : List[Any] = batch_size _a : str = image_size _a : Union[str, Any] = num_channels _a : List[Any] = num_stages _a : Dict = hidden_sizes _a : int = depths _a : Tuple = is_training _a : List[str] = use_labels _a : Dict = intermediate_size _a : int = hidden_act _a : int = num_labels _a : Any = initializer_range _a : Tuple = out_features _a : int = out_indices _a : List[Any] = scope def __lowercase ( self : Dict ): _a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : Union[str, Any] = None if self.use_labels: _a : Tuple = ids_tensor([self.batch_size] ,self.num_labels ) _a : str = self.get_config() return config, pixel_values, labels def __lowercase ( self : Any ): return ConvNextVaConfig( num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ): _a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Any = model(_UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ): _a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ): _a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Dict = model(_UpperCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] ) # verify backbone works with out_features=None _a : Tuple = None _a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def __lowercase ( self : Optional[Any] ): _a : Any = self.prepare_config_and_inputs() _a , _a , _a : Union[str, Any] = config_and_inputs _a : Any = {'pixel_values': pixel_values} return config, inputs_dict def __lowercase ( self : str ): _a : Tuple = self.prepare_config_and_inputs() _a , _a , _a : Tuple = config_and_inputs _a : List[Any] = {'pixel_values': pixel_values, 'labels': labels} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : str = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCAmelCase : str = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : int = False lowerCAmelCase : str = False lowerCAmelCase : Optional[Any] = False lowerCAmelCase : List[str] = False lowerCAmelCase : Optional[int] = False def __lowercase ( self : List[Any] ): _a : str = ConvNextVaModelTester(self ) _a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 ) def __lowercase ( self : Optional[Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowercase ( self : str ): return @unittest.skip(reason='ConvNextV2 does not use inputs_embeds' ) def __lowercase ( self : List[Any] ): pass @unittest.skip(reason='ConvNextV2 does not support input and output embeddings' ) def __lowercase ( self : Optional[int] ): pass @unittest.skip(reason='ConvNextV2 does not use feedforward chunking' ) def __lowercase ( self : Any ): pass def __lowercase ( self : List[str] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Any = True if model_class.__name__ in [ *get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase ), ]: continue _a : Optional[Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() _a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : Optional[int] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : str ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Optional[int] = False _a : Tuple = True if ( model_class.__name__ in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue _a : Tuple = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.gradient_checkpointing_enable() model.train() _a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : List[Any] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : List[Any] ): _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = model_class(_UpperCAmelCase ) _a : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Dict = [*signature.parameters.keys()] _a : int = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_UpperCAmelCase ) def __lowercase ( self : int ): _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def __lowercase ( self : Any ): def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ): _a : Union[str, Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) ) _a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _a : str = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) _a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a : Optional[Any] = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : List[Any] ): _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def __lowercase ( self : int ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __lowerCamelCase ( ) -> List[Any]: _a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): @cached_property def __lowercase ( self : Optional[Any] ): return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None @slow def __lowercase ( self : Any ): _a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase ) _a : Optional[int] = self.default_image_processor _a : str = prepare_img() _a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _a : Dict = model(**_UpperCAmelCase ) # verify the logits _a : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,_UpperCAmelCase ) _a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
89
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Any = 'conditional_detr' lowerCAmelCase : List[str] = ['past_key_values'] lowerCAmelCase : Optional[int] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,): if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) _a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : str = backbone_config.get('model_type' ) _a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] _a : List[Any] = config_class.from_dict(_UpperCAmelCase ) _a : Tuple = use_timm_backbone _a : Union[str, Any] = backbone_config _a : List[Any] = num_channels _a : Union[str, Any] = num_queries _a : Optional[Any] = d_model _a : Tuple = encoder_ffn_dim _a : Dict = encoder_layers _a : List[str] = encoder_attention_heads _a : Union[str, Any] = decoder_ffn_dim _a : Optional[int] = decoder_layers _a : int = decoder_attention_heads _a : Optional[int] = dropout _a : Tuple = attention_dropout _a : List[Any] = activation_dropout _a : str = activation_function _a : Optional[Any] = init_std _a : Union[str, Any] = init_xavier_std _a : List[Any] = encoder_layerdrop _a : List[Any] = decoder_layerdrop _a : Dict = encoder_layers _a : List[Any] = auxiliary_loss _a : Optional[int] = position_embedding_type _a : List[Any] = backbone _a : Optional[int] = use_pretrained_backbone _a : Optional[int] = dilation # Hungarian matcher _a : Tuple = class_cost _a : str = bbox_cost _a : Any = giou_cost # Loss coefficients _a : Tuple = mask_loss_coefficient _a : Dict = dice_loss_coefficient _a : Tuple = cls_loss_coefficient _a : Any = bbox_loss_coefficient _a : Dict = giou_loss_coefficient _a : Union[str, Any] = focal_alpha super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase ) @property def __lowercase ( self : Dict ): return self.encoder_attention_heads @property def __lowercase ( self : str ): return self.d_model def __lowercase ( self : int ): _a : List[str] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _a : Dict = self.backbone_config.to_dict() _a : Union[str, Any] = self.__class__.model_type return output class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = version.parse('1.11' ) @property def __lowercase ( self : Dict ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ] ) @property def __lowercase ( self : Any ): return 1E-5 @property def __lowercase ( self : List[Any] ): return 12
89
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: _a : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('cls_token', 'vit.embeddings.cls_token'), ('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'), ('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'), ('pos_embed', 'vit.embeddings.position_embeddings'), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ('pre_logits.fc.weight', 'pooler.dense.weight'), ('pre_logits.fc.bias', 'pooler.dense.bias'), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _a : Dict = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('norm.weight', 'vit.layernorm.weight'), ('norm.bias', 'vit.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Dict: for i in range(config.num_hidden_layers ): if base_model: _a : Union[str, Any] = '' else: _a : Optional[Any] = 'vit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _a : Dict = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) _a : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _a : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] _a : Union[str, Any] = in_proj_bias[: config.hidden_size] _a : Union[str, Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _a : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _a : Dict = in_proj_weight[ -config.hidden_size :, : ] _a : Optional[Any] = in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]: _a : int = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _a : Tuple = dct.pop(lowerCAmelCase_ ) _a : Optional[Any] = val def __lowerCamelCase ( ) -> Any: _a : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg' _a : List[Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _a : Any = ViTConfig() _a : List[Any] = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": _a : List[Any] = True _a : str = int(vit_name[-12:-10] ) _a : Any = int(vit_name[-9:-6] ) else: _a : str = 1000 _a : List[Any] = 'huggingface/label-files' _a : int = 'imagenet-1k-id2label.json' _a : Any = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) ) _a : Optional[int] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} _a : str = idalabel _a : int = {v: k for k, v in idalabel.items()} _a : List[Any] = int(vit_name[-6:-4] ) _a : Optional[int] = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('tiny' ): _a : Optional[int] = 192 _a : Dict = 768 _a : List[Any] = 12 _a : Union[str, Any] = 3 elif vit_name[9:].startswith('small' ): _a : Optional[Any] = 384 _a : str = 1536 _a : str = 12 _a : Union[str, Any] = 6 else: pass else: if vit_name[4:].startswith('small' ): _a : int = 768 _a : str = 2304 _a : List[str] = 8 _a : Optional[int] = 8 elif vit_name[4:].startswith('base' ): pass elif vit_name[4:].startswith('large' ): _a : Any = 1024 _a : Optional[int] = 4096 _a : Union[str, Any] = 24 _a : Any = 16 elif vit_name[4:].startswith('huge' ): _a : str = 1280 _a : Dict = 5120 _a : str = 32 _a : str = 16 # load original model from timm _a : Union[str, Any] = timm.create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ ) timm_model.eval() # load state_dict of original model, remove and rename some keys _a : str = timm_model.state_dict() if base_model: remove_classification_head_(lowerCAmelCase_ ) _a : Optional[Any] = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ ) for src, dest in rename_keys: rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # load HuggingFace model if vit_name[-5:] == "in21k": _a : Union[str, Any] = ViTModel(lowerCAmelCase_ ).eval() else: _a : str = ViTForImageClassification(lowerCAmelCase_ ).eval() model.load_state_dict(lowerCAmelCase_ ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: _a : Any = DeiTImageProcessor(size=config.image_size ) else: _a : Union[str, Any] = ViTImageProcessor(size=config.image_size ) _a : List[str] = image_processor(images=prepare_img() , return_tensors='pt' ) _a : str = encoding['pixel_values'] _a : List[str] = model(lowerCAmelCase_ ) if base_model: _a : Optional[Any] = timm_model.forward_features(lowerCAmelCase_ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(lowerCAmelCase_ , outputs.pooler_output , atol=1E-3 ) else: _a : Union[str, Any] = timm_model(lowerCAmelCase_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1E-3 ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--vit_name''', default='''vit_base_patch16_224''', type=str, help='''Name of the ViT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __lowerCAmelCase = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
89
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __magic_name__ : def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,): _a : Optional[Any] = parent _a : List[Any] = batch_size _a : str = image_size _a : Union[str, Any] = num_channels _a : List[Any] = num_stages _a : Dict = hidden_sizes _a : int = depths _a : Tuple = is_training _a : List[str] = use_labels _a : Dict = intermediate_size _a : int = hidden_act _a : int = num_labels _a : Any = initializer_range _a : Tuple = out_features _a : int = out_indices _a : List[Any] = scope def __lowercase ( self : Dict ): _a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : Union[str, Any] = None if self.use_labels: _a : Tuple = ids_tensor([self.batch_size] ,self.num_labels ) _a : str = self.get_config() return config, pixel_values, labels def __lowercase ( self : Any ): return ConvNextVaConfig( num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ): _a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Any = model(_UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ): _a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ): _a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Dict = model(_UpperCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] ) # verify backbone works with out_features=None _a : Tuple = None _a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def __lowercase ( self : Optional[Any] ): _a : Any = self.prepare_config_and_inputs() _a , _a , _a : Union[str, Any] = config_and_inputs _a : Any = {'pixel_values': pixel_values} return config, inputs_dict def __lowercase ( self : str ): _a : Tuple = self.prepare_config_and_inputs() _a , _a , _a : Tuple = config_and_inputs _a : List[Any] = {'pixel_values': pixel_values, 'labels': labels} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : str = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCAmelCase : str = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : int = False lowerCAmelCase : str = False lowerCAmelCase : Optional[Any] = False lowerCAmelCase : List[str] = False lowerCAmelCase : Optional[int] = False def __lowercase ( self : List[Any] ): _a : str = ConvNextVaModelTester(self ) _a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 ) def __lowercase ( self : Optional[Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowercase ( self : str ): return @unittest.skip(reason='ConvNextV2 does not use inputs_embeds' ) def __lowercase ( self : List[Any] ): pass @unittest.skip(reason='ConvNextV2 does not support input and output embeddings' ) def __lowercase ( self : Optional[int] ): pass @unittest.skip(reason='ConvNextV2 does not use feedforward chunking' ) def __lowercase ( self : Any ): pass def __lowercase ( self : List[str] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Any = True if model_class.__name__ in [ *get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase ), ]: continue _a : Optional[Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() _a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : Optional[int] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : str ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Optional[int] = False _a : Tuple = True if ( model_class.__name__ in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue _a : Tuple = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.gradient_checkpointing_enable() model.train() _a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : List[Any] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : List[Any] ): _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = model_class(_UpperCAmelCase ) _a : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Dict = [*signature.parameters.keys()] _a : int = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_UpperCAmelCase ) def __lowercase ( self : int ): _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def __lowercase ( self : Any ): def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ): _a : Union[str, Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) ) _a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _a : str = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) _a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a : Optional[Any] = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : List[Any] ): _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def __lowercase ( self : int ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __lowerCamelCase ( ) -> List[Any]: _a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): @cached_property def __lowercase ( self : Optional[Any] ): return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None @slow def __lowercase ( self : Any ): _a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase ) _a : Optional[int] = self.default_image_processor _a : str = prepare_img() _a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _a : Dict = model(**_UpperCAmelCase ) # verify the logits _a : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,_UpperCAmelCase ) _a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
89
1
'''simple docstring''' from __future__ import annotations __lowerCAmelCase = list[tuple[int, int]] __lowerCAmelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __lowerCAmelCase = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class __magic_name__ : def __init__( self : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : float ,_UpperCAmelCase : Node | None ,): _a : Optional[int] = pos_x _a : int = pos_y _a : str = (pos_y, pos_x) _a : Optional[Any] = goal_x _a : List[Any] = goal_y _a : Optional[int] = g_cost _a : str = parent _a : List[str] = self.calculate_heuristic() def __lowercase ( self : str ): _a : Optional[int] = abs(self.pos_x - self.goal_x ) _a : Optional[Any] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : Optional[int] ,_UpperCAmelCase : Dict ): return self.f_cost < other.f_cost class __magic_name__ : def __init__( self : List[Any] ,_UpperCAmelCase : tuple[int, int] ,_UpperCAmelCase : tuple[int, int] ): _a : Union[str, Any] = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,_UpperCAmelCase ) _a : Tuple = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,_UpperCAmelCase ) _a : Dict = [self.start] _a : list[Node] = [] _a : Optional[int] = False def __lowercase ( self : Any ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() _a : List[Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: _a : List[str] = True return self.retrace_path(_UpperCAmelCase ) self.closed_nodes.append(_UpperCAmelCase ) _a : Optional[Any] = self.get_successors(_UpperCAmelCase ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_UpperCAmelCase ) else: # retrieve the best current path _a : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_UpperCAmelCase ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_UpperCAmelCase ) else: self.open_nodes.append(_UpperCAmelCase ) if not self.reached: return [self.start.pos] return None def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Node ): _a : Any = [] for action in delta: _a : int = parent.pos_x + action[1] _a : Optional[int] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _UpperCAmelCase ,_UpperCAmelCase ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,_UpperCAmelCase ,) ) return successors def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Node | None ): _a : List[Any] = node _a : List[Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) _a : List[str] = current_node.parent path.reverse() return path if __name__ == "__main__": __lowerCAmelCase = (0, 0) __lowerCAmelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('''------''') __lowerCAmelCase = GreedyBestFirst(init, goal) __lowerCAmelCase = greedy_bf.search() if path: for pos_x, pos_y in path: __lowerCAmelCase = 2 for elem in grid: print(elem)
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase = logging.get_logger(__name__) class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Dict = ['pixel_values'] def __init__( self : Union[str, Any] ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : Optional[Dict[str, int]] = None ,_UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : Union[int, float] = 1 / 255 ,_UpperCAmelCase : Dict[str, int] = None ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : Optional[Union[float, List[float]]] = None ,_UpperCAmelCase : Optional[Union[float, List[float]]] = None ,**_UpperCAmelCase : int ,): super().__init__(**_UpperCAmelCase ) _a : Union[str, Any] = size if size is not None else {'height': 224, 'width': 224} _a : Optional[int] = get_size_dict(_UpperCAmelCase ) _a : int = crop_size if crop_size is not None else {'height': 224, 'width': 224} _a : int = get_size_dict(_UpperCAmelCase ,default_to_square=_UpperCAmelCase ,param_name='crop_size' ) _a : Any = do_resize _a : str = do_rescale _a : Optional[Any] = do_normalize _a : Optional[Any] = do_center_crop _a : Any = crop_size _a : Optional[Any] = size _a : Optional[int] = resample _a : str = rescale_factor _a : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _a : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __lowercase ( self : List[str] ,_UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Dict[str, int] ,_UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR ,_UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ,**_UpperCAmelCase : Optional[Any] ,): _a : Dict = get_size_dict(_UpperCAmelCase ) if "shortest_edge" in size: _a : Union[str, Any] = get_resize_output_image_size(_UpperCAmelCase ,size=size['shortest_edge'] ,default_to_square=_UpperCAmelCase ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: _a : str = (size['height'], size['width']) else: raise ValueError(F"""Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}""" ) return resize(_UpperCAmelCase ,size=_UpperCAmelCase ,resample=_UpperCAmelCase ,data_format=_UpperCAmelCase ,**_UpperCAmelCase ) def __lowercase ( self : List[str] ,_UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Dict[str, int] ,_UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ,**_UpperCAmelCase : Any ,): _a : Union[str, Any] = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(_UpperCAmelCase ,size=(size['height'], size['width']) ,data_format=_UpperCAmelCase ,**_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : np.ndarray ,_UpperCAmelCase : float ,_UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ,**_UpperCAmelCase : str ): return rescale(_UpperCAmelCase ,scale=_UpperCAmelCase ,data_format=_UpperCAmelCase ,**_UpperCAmelCase ) def __lowercase ( self : List[Any] ,_UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Union[float, List[float]] ,_UpperCAmelCase : Union[float, List[float]] ,_UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ,**_UpperCAmelCase : List[Any] ,): return normalize(_UpperCAmelCase ,mean=_UpperCAmelCase ,std=_UpperCAmelCase ,data_format=_UpperCAmelCase ,**_UpperCAmelCase ) def __lowercase ( self : Optional[int] ,_UpperCAmelCase : ImageInput ,_UpperCAmelCase : Optional[bool] = None ,_UpperCAmelCase : Dict[str, int] = None ,_UpperCAmelCase : PILImageResampling = None ,_UpperCAmelCase : bool = None ,_UpperCAmelCase : int = None ,_UpperCAmelCase : Optional[bool] = None ,_UpperCAmelCase : Optional[float] = None ,_UpperCAmelCase : Optional[bool] = None ,_UpperCAmelCase : Optional[Union[float, List[float]]] = None ,_UpperCAmelCase : Optional[Union[float, List[float]]] = None ,_UpperCAmelCase : Optional[Union[str, TensorType]] = None ,_UpperCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**_UpperCAmelCase : Any ,): _a : str = do_resize if do_resize is not None else self.do_resize _a : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale _a : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize _a : int = do_center_crop if do_center_crop is not None else self.do_center_crop _a : Dict = crop_size if crop_size is not None else self.crop_size _a : Union[str, Any] = get_size_dict(_UpperCAmelCase ,param_name='crop_size' ,default_to_square=_UpperCAmelCase ) _a : str = resample if resample is not None else self.resample _a : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _a : Optional[Any] = image_mean if image_mean is not None else self.image_mean _a : Optional[Any] = image_std if image_std is not None else self.image_std _a : Union[str, Any] = size if size is not None else self.size _a : Optional[int] = get_size_dict(_UpperCAmelCase ) if not is_batched(_UpperCAmelCase ): _a : Tuple = [images] if not valid_images(_UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. _a : int = [to_numpy_array(_UpperCAmelCase ) for image in images] if do_resize: _a : Dict = [self.resize(image=_UpperCAmelCase ,size=_UpperCAmelCase ,resample=_UpperCAmelCase ) for image in images] if do_center_crop: _a : Tuple = [self.center_crop(image=_UpperCAmelCase ,size=_UpperCAmelCase ) for image in images] if do_rescale: _a : Optional[Any] = [self.rescale(image=_UpperCAmelCase ,scale=_UpperCAmelCase ) for image in images] if do_normalize: _a : str = [self.normalize(image=_UpperCAmelCase ,mean=_UpperCAmelCase ,std=_UpperCAmelCase ) for image in images] _a : Optional[Any] = [to_channel_dimension_format(_UpperCAmelCase ,_UpperCAmelCase ) for image in images] _a : Optional[Any] = {'pixel_values': images} return BatchFeature(data=_UpperCAmelCase ,tensor_type=_UpperCAmelCase )
89
'''simple docstring''' import math def __lowerCamelCase ( lowerCAmelCase_ ) -> bool: _a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int: _a : int = 0 _a : Optional[Any] = 0 _a : int = 3 while True: _a : Tuple = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(lowerCAmelCase_ ): _a : Union[str, Any] = int(lowerCAmelCase_ ) total_partitions += 1 if check_partition_perfect(lowerCAmelCase_ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(lowerCAmelCase_ ) integer += 1 if __name__ == "__main__": print(f"""{solution() = }""")
89
1
'''simple docstring''' import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : def __init__( self : Optional[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple=13 ,_UpperCAmelCase : Optional[int]=30 ,_UpperCAmelCase : int=2 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : Optional[Any]=32 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : List[str]=4 ,_UpperCAmelCase : Any=37 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : Any=0.1 ,_UpperCAmelCase : Any=0.1 ,_UpperCAmelCase : Union[str, Any]=10 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=3 ,_UpperCAmelCase : Optional[Any]=0.6 ,_UpperCAmelCase : str=None ,): _a : Union[str, Any] = parent _a : Optional[Any] = batch_size _a : Any = image_size _a : int = patch_size _a : int = num_channels _a : Tuple = is_training _a : Any = use_labels _a : int = hidden_size _a : Tuple = num_hidden_layers _a : Any = num_attention_heads _a : List[Any] = intermediate_size _a : Dict = hidden_act _a : Tuple = hidden_dropout_prob _a : List[str] = attention_probs_dropout_prob _a : List[Any] = type_sequence_label_size _a : List[str] = initializer_range _a : Optional[int] = mask_ratio _a : List[Any] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _a : int = (image_size // patch_size) ** 2 _a : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def __lowercase ( self : Any ): _a : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : Dict = None if self.use_labels: _a : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _a : Dict = self.get_config() return config, pixel_values, labels def __lowercase ( self : Optional[int] ): return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ): _a : Any = ViTMAEModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Tuple = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self : Any ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ): _a : List[str] = ViTMAEForPreTraining(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Union[str, Any] = model(_UpperCAmelCase ) _a : List[str] = (self.image_size // self.patch_size) ** 2 _a : int = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _a : Optional[int] = 1 _a : Union[str, Any] = ViTMAEForPreTraining(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _a : Optional[int] = model(_UpperCAmelCase ) _a : int = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def __lowercase ( self : int ): _a : List[str] = self.prepare_config_and_inputs() _a , _a , _a : str = config_and_inputs _a : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : List[str] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () lowerCAmelCase : str = {'feature-extraction': ViTMAEModel} if is_torch_available() else {} lowerCAmelCase : Union[str, Any] = False lowerCAmelCase : str = False lowerCAmelCase : Dict = False lowerCAmelCase : List[str] = False def __lowercase ( self : Dict ): _a : Any = ViTMAEModelTester(self ) _a : Optional[int] = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 ) def __lowercase ( self : Union[str, Any] ): self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def __lowercase ( self : List[str] ): pass def __lowercase ( self : List[Any] ): _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : List[str] = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) _a : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) ) def __lowercase ( self : Optional[int] ): _a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : List[str] = model_class(_UpperCAmelCase ) _a : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : str = [*signature.parameters.keys()] _a : str = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_UpperCAmelCase ) def __lowercase ( self : Optional[int] ): _a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def __lowercase ( self : Dict ): _a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase ) def __lowercase ( self : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Tuple ): # make masks reproducible np.random.seed(2 ) _a : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) _a : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _a : Optional[int] = torch.from_numpy(_UpperCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _a : Optional[int] = pt_noise super().check_pt_tf_models(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : List[str] ): _a , _a : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : List[str] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _a : Any = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) ) _a : str = outputs[0].cpu().numpy() _a : Any = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase ) _a : Dict = model_class.from_pretrained(_UpperCAmelCase ) model.to(_UpperCAmelCase ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _a : str = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) ) # Make sure we don't have nans _a : Any = after_outputs[0].cpu().numpy() _a : int = 0 _a : Optional[int] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_UpperCAmelCase ,1E-5 ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def __lowercase ( self : str ): pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def __lowercase ( self : Dict ): pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def __lowercase ( self : Optional[Any] ): pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def __lowercase ( self : Union[str, Any] ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __lowercase ( self : Optional[Any] ): pass @slow def __lowercase ( self : Any ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : Union[str, Any] = ViTMAEModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __lowerCamelCase ( ) -> Optional[int]: _a : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): @cached_property def __lowercase ( self : Union[str, Any] ): return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def __lowercase ( self : Optional[int] ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) _a : int = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(_UpperCAmelCase ) _a : Union[str, Any] = self.default_image_processor _a : List[Any] = prepare_img() _a : List[Any] = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _a : Dict = ViTMAEConfig() _a : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _a : Any = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): _a : Dict = model(**_UpperCAmelCase ,noise=torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase ) ) # verify the logits _a : int = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape ,_UpperCAmelCase ) _a : int = torch.tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(_UpperCAmelCase ) ,atol=1E-4 ) )
89
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple: _a : Any = [] for old_item in old_list: _a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' ) _a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' ) _a : str = new_item.replace('out_layers.0' , 'norm2' ) _a : List[str] = new_item.replace('out_layers.3' , 'conv2' ) _a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' ) _a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' ) _a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any: _a : List[str] = [] for old_item in old_list: _a : List[Any] = old_item _a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' ) _a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' ) _a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) _a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) _a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _a : Optional[Any] = old_checkpoint[path] _a : Optional[Any] = old_tensor.shape[0] // 3 _a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _a : int = old_tensor.shape[0] // config['num_head_channels'] // 3 _a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 ) _a : Dict = query.reshape(lowerCAmelCase_ ) _a : str = key.reshape(lowerCAmelCase_ ) _a : Optional[int] = value.reshape(lowerCAmelCase_ ) for path in paths: _a : Dict = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) _a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) _a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: _a : int = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _a : List[str] = old_checkpoint[path['old']][:, :, 0] else: _a : Dict = old_checkpoint[path['old']] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _a : Optional[int] = {} _a : Dict = checkpoint['time_embed.0.weight'] _a : Tuple = checkpoint['time_embed.0.bias'] _a : Union[str, Any] = checkpoint['time_embed.2.weight'] _a : List[str] = checkpoint['time_embed.2.bias'] _a : List[str] = checkpoint['input_blocks.0.0.weight'] _a : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] _a : Optional[int] = checkpoint['out.0.weight'] _a : int = checkpoint['out.0.bias'] _a : List[str] = checkpoint['out.2.weight'] _a : Optional[int] = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) _a : Dict = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the middle blocks only _a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) _a : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the output blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) _a : str = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } for i in range(1 , lowerCAmelCase_ ): _a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1) _a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1) _a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: _a : List[Any] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] _a : Union[str, Any] = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue _a : Any = renew_resnet_paths(lowerCAmelCase_ ) _a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} _a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ ) if len(lowerCAmelCase_ ): _a : List[str] = renew_attention_paths(lowerCAmelCase_ ) _a : List[Any] = { 'old': f"""input_blocks.{i}.1""", 'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : Optional[Any] = { f"""input_blocks.{i}.1.qkv.bias""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , ) _a : str = middle_blocks[0] _a : Tuple = middle_blocks[1] _a : Any = middle_blocks[2] _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : Any = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : int = renew_attention_paths(lowerCAmelCase_ ) _a : int = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ): _a : List[str] = i // (config['num_res_blocks'] + 1) _a : Any = i % (config['num_res_blocks'] + 1) _a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]] _a : Optional[Any] = {} for layer in output_block_layers: _a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(lowerCAmelCase_ ) else: _a : str = [layer_name] if len(lowerCAmelCase_ ) > 1: _a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] _a : Dict = renew_resnet_paths(lowerCAmelCase_ ) _a : str = renew_resnet_paths(lowerCAmelCase_ ) _a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) _a : Tuple = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] _a : List[str] = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(lowerCAmelCase_ ) == 2: _a : Union[str, Any] = [] if len(lowerCAmelCase_ ): _a : Tuple = renew_attention_paths(lowerCAmelCase_ ) _a : str = { 'old': f"""output_blocks.{i}.1""", 'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : List[Any] = { f"""output_blocks.{i}.1.qkv.bias""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , ) else: _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] ) _a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] ) _a : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: __lowerCAmelCase = json.loads(f.read()) __lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
89
1
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Union[str, Any] = ['vqvae'] def __init__( self : str ,_UpperCAmelCase : AutoencoderKL ,_UpperCAmelCase : UNetaDConditionModel ,_UpperCAmelCase : Mel ,_UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] ,): super().__init__() self.register_modules(unet=_UpperCAmelCase ,scheduler=_UpperCAmelCase ,mel=_UpperCAmelCase ,vqvae=_UpperCAmelCase ) def __lowercase ( self : Tuple ): return 50 if isinstance(self.scheduler ,_UpperCAmelCase ) else 1000 @torch.no_grad() def __call__( self : List[str] ,_UpperCAmelCase : int = 1 ,_UpperCAmelCase : str = None ,_UpperCAmelCase : np.ndarray = None ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = 0 ,_UpperCAmelCase : int = None ,_UpperCAmelCase : torch.Generator = None ,_UpperCAmelCase : float = 0 ,_UpperCAmelCase : float = 0 ,_UpperCAmelCase : torch.Generator = None ,_UpperCAmelCase : float = 0 ,_UpperCAmelCase : torch.Tensor = None ,_UpperCAmelCase : torch.Tensor = None ,_UpperCAmelCase : str=True ,): _a : List[str] = steps or self.get_default_steps() self.scheduler.set_timesteps(_UpperCAmelCase ) _a : Any = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: _a : Optional[Any] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: _a : int = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) ,generator=_UpperCAmelCase ,device=self.device ,) _a : Optional[int] = noise _a : Optional[int] = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_UpperCAmelCase ,_UpperCAmelCase ) _a : int = self.mel.audio_slice_to_image(_UpperCAmelCase ) _a : str = np.frombuffer(input_image.tobytes() ,dtype='uint8' ).reshape( (input_image.height, input_image.width) ) _a : Union[str, Any] = (input_image / 255) * 2 - 1 _a : Optional[int] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device ) if self.vqvae is not None: _a : Dict = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase ,0 ) ).latent_dist.sample( generator=_UpperCAmelCase )[0] _a : Dict = self.vqvae.config.scaling_factor * input_images if start_step > 0: _a : str = self.scheduler.add_noise(_UpperCAmelCase ,_UpperCAmelCase ,self.scheduler.timesteps[start_step - 1] ) _a : List[str] = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) _a : List[Any] = int(mask_start_secs * pixels_per_second ) _a : str = int(mask_end_secs * pixels_per_second ) _a : str = self.scheduler.add_noise(_UpperCAmelCase ,_UpperCAmelCase ,torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet ,_UpperCAmelCase ): _a : int = self.unet(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )['sample'] else: _a : Union[str, Any] = self.unet(_UpperCAmelCase ,_UpperCAmelCase )['sample'] if isinstance(self.scheduler ,_UpperCAmelCase ): _a : int = self.scheduler.step( model_output=_UpperCAmelCase ,timestep=_UpperCAmelCase ,sample=_UpperCAmelCase ,eta=_UpperCAmelCase ,generator=_UpperCAmelCase ,)['prev_sample'] else: _a : int = self.scheduler.step( model_output=_UpperCAmelCase ,timestep=_UpperCAmelCase ,sample=_UpperCAmelCase ,generator=_UpperCAmelCase ,)['prev_sample'] if mask is not None: if mask_start > 0: _a : Tuple = mask[:, step, :, :mask_start] if mask_end > 0: _a : List[Any] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance _a : int = 1 / self.vqvae.config.scaling_factor * images _a : Tuple = self.vqvae.decode(_UpperCAmelCase )['sample'] _a : Union[str, Any] = (images / 2 + 0.5).clamp(0 ,1 ) _a : Dict = images.cpu().permute(0 ,2 ,3 ,1 ).numpy() _a : Optional[Any] = (images * 255).round().astype('uint8' ) _a : List[Any] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_UpperCAmelCase ,mode='RGB' ).convert('L' ) for _ in images) ) _a : List[Any] = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_UpperCAmelCase ) ) @torch.no_grad() def __lowercase ( self : str ,_UpperCAmelCase : List[Image.Image] ,_UpperCAmelCase : int = 50 ): assert isinstance(self.scheduler ,_UpperCAmelCase ) self.scheduler.set_timesteps(_UpperCAmelCase ) _a : Union[str, Any] = np.array( [np.frombuffer(image.tobytes() ,dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) _a : Optional[Any] = (sample / 255) * 2 - 1 _a : Any = torch.Tensor(_UpperCAmelCase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ): _a : Dict = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps _a : Any = self.scheduler.alphas_cumprod[t] _a : Optional[int] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) _a : int = 1 - alpha_prod_t _a : Union[str, Any] = self.unet(_UpperCAmelCase ,_UpperCAmelCase )['sample'] _a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output _a : Any = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) _a : Any = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def __lowercase ( _UpperCAmelCase : torch.Tensor ,_UpperCAmelCase : torch.Tensor ,_UpperCAmelCase : float ): _a : int = acos(torch.dot(torch.flatten(_UpperCAmelCase ) ,torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) ) return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
89
'''simple docstring''' import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array: _a : Optional[int] = f"""{sampling_rate}""" _a : Any = '1' _a : Optional[int] = 'f32le' _a : Any = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: _a : int = ffmpeg_process.communicate(lowerCAmelCase_ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error _a : int = output_stream[0] _a : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "f32le" , ) -> Union[str, Any]: _a : List[str] = f"""{sampling_rate}""" _a : List[str] = '1' if format_for_conversion == "s16le": _a : List[Any] = 2 elif format_for_conversion == "f32le": _a : Dict = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) _a : Any = platform.system() if system == "Linux": _a : Union[str, Any] = 'alsa' _a : Union[str, Any] = 'default' elif system == "Darwin": _a : Any = 'avfoundation' _a : Optional[int] = ':0' elif system == "Windows": _a : str = 'dshow' _a : Tuple = 'default' _a : str = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] _a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _a : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ ) for item in iterator: yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "f32le" , ) -> str: if stream_chunk_s is not None: _a : str = stream_chunk_s else: _a : List[str] = chunk_length_s _a : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ ) if format_for_conversion == "s16le": _a : Optional[Any] = np.intaa _a : List[Any] = 2 elif format_for_conversion == "f32le": _a : Tuple = np.floataa _a : Any = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: _a : str = chunk_length_s / 6 _a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCAmelCase_ , (int, float) ): _a : List[str] = [stride_length_s, stride_length_s] _a : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _a : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _a : Any = datetime.datetime.now() _a : Dict = datetime.timedelta(seconds=lowerCAmelCase_ ) for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ): # Put everything back in numpy scale _a : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ ) _a : List[str] = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) _a : Union[str, Any] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[Any]: _a : Tuple = B'' _a , _a : str = stride if stride_left + stride_right >= chunk_len: raise ValueError( f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) _a : Optional[int] = 0 for raw in iterator: acc += raw if stream and len(lowerCAmelCase_ ) < chunk_len: _a : str = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCAmelCase_ ) >= chunk_len: # We are flushing the accumulator _a : Union[str, Any] = (_stride_left, stride_right) _a : Dict = {'raw': acc[:chunk_len], 'stride': stride} if stream: _a : List[str] = False yield item _a : int = stride_left _a : List[Any] = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCAmelCase_ ) > stride_left: _a : str = {'raw': acc, 'stride': (_stride_left, 0)} if stream: _a : str = False yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _a : Optional[Any] = 2**24 # 16Mo try: with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process: while True: _a : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
89
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''caidas/swin2sr-classicalsr-x2-64''': ( '''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json''' ), } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : List[str] = 'swin2sr' lowerCAmelCase : str = { 'hidden_size': 'embed_dim', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Union[str, Any] ,_UpperCAmelCase : Tuple=64 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : Any=180 ,_UpperCAmelCase : Optional[Any]=[6, 6, 6, 6, 6, 6] ,_UpperCAmelCase : Any=[6, 6, 6, 6, 6, 6] ,_UpperCAmelCase : int=8 ,_UpperCAmelCase : Any=2.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : Tuple=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : Tuple=False ,_UpperCAmelCase : Optional[int]=0.02 ,_UpperCAmelCase : Union[str, Any]=1E-5 ,_UpperCAmelCase : int=2 ,_UpperCAmelCase : Tuple=1.0 ,_UpperCAmelCase : Union[str, Any]="1conv" ,_UpperCAmelCase : Tuple="pixelshuffle" ,**_UpperCAmelCase : List[str] ,): super().__init__(**_UpperCAmelCase ) _a : List[str] = image_size _a : Dict = patch_size _a : Optional[int] = num_channels _a : Optional[int] = embed_dim _a : Union[str, Any] = depths _a : Optional[int] = len(_UpperCAmelCase ) _a : Optional[Any] = num_heads _a : str = window_size _a : Optional[Any] = mlp_ratio _a : Optional[int] = qkv_bias _a : Tuple = hidden_dropout_prob _a : List[Any] = attention_probs_dropout_prob _a : Any = drop_path_rate _a : str = hidden_act _a : Tuple = use_absolute_embeddings _a : Dict = layer_norm_eps _a : Any = initializer_range _a : Optional[Any] = upscale _a : int = img_range _a : Union[str, Any] = resi_connection _a : int = upsampler
89
'''simple docstring''' __lowerCAmelCase = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : List[Any] = set() # keep track of all the paths to be checked _a : Any = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _a : Tuple = queue.pop(0 ) # get the last node from the path _a : Tuple = path[-1] if node not in explored: _a : Optional[Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _a : Any = list(lowerCAmelCase_ ) new_path.append(lowerCAmelCase_ ) queue.append(lowerCAmelCase_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(lowerCAmelCase_ ) # in case there's no path between the 2 nodes return [] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _a : Optional[int] = [start] _a : Dict = set(lowerCAmelCase_ ) # Keep tab on distances from `start` node. _a : Dict = {start: 0, target: -1} while queue: _a : List[str] = queue.pop(0 ) if node == target: _a : Any = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(lowerCAmelCase_ ) queue.append(lowerCAmelCase_ ) _a : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
89
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } __lowerCAmelCase = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]: for attribute in key.split('.' ): _a : Optional[int] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if weight_type is not None: _a : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape else: _a : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _a : Optional[Any] = value elif weight_type == "weight_g": _a : Optional[int] = value elif weight_type == "weight_v": _a : Optional[Any] = value elif weight_type == "bias": _a : List[Any] = value elif weight_type == "running_mean": _a : Optional[int] = value elif weight_type == "running_var": _a : Optional[int] = value elif weight_type == "num_batches_tracked": _a : Tuple = value elif weight_type == "inv_freq": _a : Dict = value else: _a : List[str] = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict: _a : Union[str, Any] = [] _a : Any = fairseq_model.state_dict() _a : Any = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): _a : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , ) _a : int = True else: for key, mapped_key in MAPPING.items(): _a : Any = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: _a : Any = True if "*" in mapped_key: _a : Optional[Any] = name.split(lowerCAmelCase_ )[0].split('.' )[-2] _a : Optional[int] = mapped_key.replace('*' , lowerCAmelCase_ ) if "pos_bias_u" in name: _a : Any = None elif "pos_bias_v" in name: _a : Union[str, Any] = None elif "weight_g" in name: _a : Tuple = 'weight_g' elif "weight_v" in name: _a : Dict = 'weight_v' elif "bias" in name: _a : Optional[Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj _a : Dict = 'weight' elif "running_mean" in name: _a : str = 'running_mean' elif "inv_freq" in name: _a : Optional[Any] = 'inv_freq' elif "running_var" in name: _a : Tuple = 'running_var' elif "num_batches_tracked" in name: _a : List[str] = 'num_batches_tracked' else: _a : Optional[Any] = None set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) continue if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _a : Any = full_name.split('conv_layers.' )[-1] _a : str = name.split('.' ) _a : Union[str, Any] = int(items[0] ) _a : Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) _a : Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) _a : Optional[Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) _a : List[str] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) _a : List[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase_ ) @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True ) -> Optional[int]: if config_path is not None: _a : str = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act='swish' ) else: _a : List[str] = WavaVecaConformerConfig() if "rope" in checkpoint_path: _a : Dict = 'rotary' if is_finetuned: if dict_path: _a : List[Any] = Dictionary.load(lowerCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _a : str = target_dict.pad_index _a : int = target_dict.bos_index _a : Optional[Any] = target_dict.eos_index _a : str = len(target_dict.symbols ) _a : List[Any] = os.path.join(lowerCAmelCase_ , 'vocab.json' ) if not os.path.isdir(lowerCAmelCase_ ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase_ ) ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) _a : List[Any] = target_dict.indices # fairseq has the <pad> and <s> switched _a : Union[str, Any] = 0 _a : Any = 1 with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) _a : Union[str, Any] = WavaVecaCTCTokenizer( lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase_ , ) _a : int = True if config.feat_extract_norm == 'layer' else False _a : Dict = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) _a : str = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) _a : List[Any] = WavaVecaConformerForCTC(lowerCAmelCase_ ) else: _a : Optional[int] = WavaVecaConformerForPreTraining(lowerCAmelCase_ ) if is_finetuned: _a , _a , _a : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: _a : Dict = argparse.Namespace(task='audio_pretraining' ) _a : Any = fairseq.tasks.setup_task(lowerCAmelCase_ ) _a , _a , _a : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ ) _a : int = model[0].eval() recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) __lowerCAmelCase = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwinForImageClassification''', '''SwinForMaskedImageModeling''', '''SwinModel''', '''SwinPreTrainedModel''', '''SwinBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSwinForImageClassification''', '''TFSwinForMaskedImageModeling''', '''TFSwinModel''', '''TFSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Optional[int] = BarthezTokenizer lowerCAmelCase : int = BarthezTokenizerFast lowerCAmelCase : Dict = True lowerCAmelCase : str = True def __lowercase ( self : List[Any] ): super().setUp() _a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase ) _a : Union[str, Any] = tokenizer def __lowercase ( self : Tuple ): _a : Optional[Any] = '<pad>' _a : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase ) def __lowercase ( self : str ): _a : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<s>' ) self.assertEqual(vocab_keys[1] ,'<pad>' ) self.assertEqual(vocab_keys[-1] ,'<mask>' ) self.assertEqual(len(_UpperCAmelCase ) ,101122 ) def __lowercase ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size ,101122 ) @require_torch def __lowercase ( self : Dict ): _a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _a : Dict = [0, 57, 3018, 70307, 91, 2] _a : Dict = self.tokenizer( _UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) self.assertEqual((2, 6) ,batch.input_ids.shape ) self.assertEqual((2, 6) ,batch.attention_mask.shape ) _a : Tuple = batch.input_ids.tolist()[0] self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): if not self.test_rust_tokenizer: return _a : str = self.get_tokenizer() _a : List[str] = self.get_rust_tokenizer() _a : Dict = 'I was born in 92000, and this is falsé.' _a : List[Any] = tokenizer.tokenize(_UpperCAmelCase ) _a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Union[str, Any] = self.get_rust_tokenizer() _a : Any = tokenizer.encode(_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) @slow def __lowercase ( self : Optional[int] ): # fmt: off _a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _a : Optional[Any] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
89
1
'''simple docstring''' from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Union[List[PIL.Image.Image], np.ndarray] lowerCAmelCase : Optional[List[bool]] if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
89
'''simple docstring''' import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __magic_name__ ( _UpperCamelCase ): @require_torch def __lowercase ( self : Tuple ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _a : List[Any] = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(_UpperCAmelCase ) BertModel.from_pretrained(_UpperCAmelCase ) BertTokenizer.from_pretrained(_UpperCAmelCase ) pipeline(task='fill-mask' ,model=_UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _a : Tuple = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : int = '1' _a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : Any ): # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _a : int = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(_UpperCAmelCase ) BertModel.from_pretrained(_UpperCAmelCase ) BertTokenizer.from_pretrained(_UpperCAmelCase ) pipeline(task='fill-mask' ,model=_UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _a : str = self.get_env() _a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : List[str] ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n ' _a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n ' _a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n ' # baseline - just load from_pretrained with normal network _a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _a : Dict = self.get_env() _a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) # next emulate no network _a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : int = '1' _a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : int ): _a : Optional[Any] = '\nfrom transformers import pipeline\n ' _a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n ' _a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n ' _a : List[Any] = self.get_env() _a : Dict = '1' _a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )] _a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,1 ,result.stderr ) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,) @require_torch def __lowercase ( self : int ): _a : Optional[int] = '\nfrom transformers import AutoModel\n ' _a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n ' # baseline - just load from_pretrained with normal network _a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _a : Tuple = self.get_env() _a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : Optional[Any] = '1' _a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() )
89
1
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Any = 'conditional_detr' lowerCAmelCase : List[str] = ['past_key_values'] lowerCAmelCase : Optional[int] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,): if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) _a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : str = backbone_config.get('model_type' ) _a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] _a : List[Any] = config_class.from_dict(_UpperCAmelCase ) _a : Tuple = use_timm_backbone _a : Union[str, Any] = backbone_config _a : List[Any] = num_channels _a : Union[str, Any] = num_queries _a : Optional[Any] = d_model _a : Tuple = encoder_ffn_dim _a : Dict = encoder_layers _a : List[str] = encoder_attention_heads _a : Union[str, Any] = decoder_ffn_dim _a : Optional[int] = decoder_layers _a : int = decoder_attention_heads _a : Optional[int] = dropout _a : Tuple = attention_dropout _a : List[Any] = activation_dropout _a : str = activation_function _a : Optional[Any] = init_std _a : Union[str, Any] = init_xavier_std _a : List[Any] = encoder_layerdrop _a : List[Any] = decoder_layerdrop _a : Dict = encoder_layers _a : List[Any] = auxiliary_loss _a : Optional[int] = position_embedding_type _a : List[Any] = backbone _a : Optional[int] = use_pretrained_backbone _a : Optional[int] = dilation # Hungarian matcher _a : Tuple = class_cost _a : str = bbox_cost _a : Any = giou_cost # Loss coefficients _a : Tuple = mask_loss_coefficient _a : Dict = dice_loss_coefficient _a : Tuple = cls_loss_coefficient _a : Any = bbox_loss_coefficient _a : Dict = giou_loss_coefficient _a : Union[str, Any] = focal_alpha super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase ) @property def __lowercase ( self : Dict ): return self.encoder_attention_heads @property def __lowercase ( self : str ): return self.d_model def __lowercase ( self : int ): _a : List[str] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _a : Dict = self.backbone_config.to_dict() _a : Union[str, Any] = self.__class__.model_type return output class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = version.parse('1.11' ) @property def __lowercase ( self : Dict ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ] ) @property def __lowercase ( self : Any ): return 1E-5 @property def __lowercase ( self : List[Any] ): return 12
89
'''simple docstring''' def __lowerCamelCase ( ) -> Tuple: for n in range(1 , 1000000 ): yield n * (n + 1) // 2 def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]: _a : Any = 1 _a : Tuple = 2 while i * i <= n: _a : Tuple = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def __lowerCamelCase ( ) -> str: return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 ) if __name__ == "__main__": print(solution())
89
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Tuple = KandinskyVaaPipeline lowerCAmelCase : List[str] = [ 'image_embeds', 'negative_image_embeds', ] lowerCAmelCase : Union[str, Any] = ['image_embeds', 'negative_image_embeds'] lowerCAmelCase : Tuple = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] lowerCAmelCase : str = False @property def __lowercase ( self : Tuple ): return 32 @property def __lowercase ( self : int ): return 32 @property def __lowercase ( self : int ): return self.time_input_dim @property def __lowercase ( self : List[Any] ): return self.time_input_dim * 4 @property def __lowercase ( self : Union[str, Any] ): return 100 @property def __lowercase ( self : Dict ): torch.manual_seed(0 ) _a : Any = { 'in_channels': 4, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } _a : List[Any] = UNetaDConditionModel(**_UpperCAmelCase ) return model @property def __lowercase ( self : Optional[Any] ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __lowercase ( self : Dict ): torch.manual_seed(0 ) _a : int = VQModel(**self.dummy_movq_kwargs ) return model def __lowercase ( self : Optional[Any] ): _a : int = self.dummy_unet _a : List[Any] = self.dummy_movq _a : Tuple = DDIMScheduler( num_train_timesteps=1000 ,beta_schedule='linear' ,beta_start=0.0_00_85 ,beta_end=0.0_12 ,clip_sample=_UpperCAmelCase ,set_alpha_to_one=_UpperCAmelCase ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=_UpperCAmelCase ,) _a : Tuple = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def __lowercase ( self : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Dict=0 ): _a : Tuple = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) _a : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( _UpperCAmelCase ) if str(_UpperCAmelCase ).startswith('mps' ): _a : str = torch.manual_seed(_UpperCAmelCase ) else: _a : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) _a : Optional[int] = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def __lowercase ( self : List[str] ): _a : Optional[Any] = 'cpu' _a : str = self.get_dummy_components() _a : List[Any] = self.pipeline_class(**_UpperCAmelCase ) _a : Optional[int] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _a : Union[str, Any] = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) ) _a : int = output.images _a : str = pipe( **self.get_dummy_inputs(_UpperCAmelCase ) ,return_dict=_UpperCAmelCase ,)[0] _a : str = image[0, -3:, -3:, -1] _a : str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _a : Union[str, Any] = np.array( [0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : int ): _a : List[str] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' ) _a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa ) pipe_prior.to(_UpperCAmelCase ) _a : List[str] = KandinskyVaaPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder' ,torch_dtype=torch.floataa ) _a : Dict = pipeline.to(_UpperCAmelCase ) pipeline.set_progress_bar_config(disable=_UpperCAmelCase ) _a : Tuple = 'red cat, 4k photo' _a : List[str] = torch.Generator(device='cuda' ).manual_seed(0 ) _a , _a : Optional[Any] = pipe_prior( _UpperCAmelCase ,generator=_UpperCAmelCase ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple() _a : List[str] = torch.Generator(device='cuda' ).manual_seed(0 ) _a : str = pipeline( image_embeds=_UpperCAmelCase ,negative_image_embeds=_UpperCAmelCase ,generator=_UpperCAmelCase ,num_inference_steps=100 ,output_type='np' ,) _a : Optional[int] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_UpperCAmelCase ,_UpperCAmelCase )
89
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class __magic_name__ ( _UpperCamelCase ): def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,): super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Tuple = Sql( cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,) def __lowercase ( self : Dict ): _a : Optional[Any] = None _a : Dict = None _a : Dict = None _a : Optional[int] = None self.builder.download_and_prepare( download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,) # Build dataset for splits _a : List[str] = self.builder.as_dataset( split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory ) return dataset class __magic_name__ : def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _a : Dict = dataset _a : List[Any] = name _a : Tuple = con _a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _a : List[Any] = num_proc _a : Tuple = to_sql_kwargs def __lowercase ( self : List[Any] ): _a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase ) _a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase ) _a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase ) _a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs ) return written def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ): _a , _a , _a : Any = args _a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs _a : Dict = query_table( table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,) _a : Tuple = batch.to_pandas() _a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase ) return num_rows or len(_UpperCAmelCase ) def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ): _a : Union[str, Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _a , _a : List[Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += num_rows return written
89
1
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class __magic_name__ : lowerCAmelCase : CommonSchedulerState # setable values lowerCAmelCase : jnp.ndarray lowerCAmelCase : jnp.ndarray lowerCAmelCase : Optional[int] = None @classmethod def __lowercase ( cls : Tuple ,_UpperCAmelCase : CommonSchedulerState ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : jnp.ndarray ): return cls(common=_UpperCAmelCase ,init_noise_sigma=_UpperCAmelCase ,timesteps=_UpperCAmelCase ) @dataclass class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : DDPMSchedulerState class __magic_name__ ( _UpperCamelCase , _UpperCamelCase ): lowerCAmelCase : Dict = [e.name for e in FlaxKarrasDiffusionSchedulers] lowerCAmelCase : jnp.dtype @property def __lowercase ( self : List[str] ): return True @register_to_config def __init__( self : List[Any] ,_UpperCAmelCase : int = 1000 ,_UpperCAmelCase : float = 0.00_01 ,_UpperCAmelCase : float = 0.02 ,_UpperCAmelCase : str = "linear" ,_UpperCAmelCase : Optional[jnp.ndarray] = None ,_UpperCAmelCase : str = "fixed_small" ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : str = "epsilon" ,_UpperCAmelCase : jnp.dtype = jnp.floataa ,): _a : Any = dtype def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Optional[CommonSchedulerState] = None ): if common is None: _a : Optional[int] = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution _a : Union[str, Any] = jnp.array(1.0 ,dtype=self.dtype ) _a : Dict = jnp.arange(0 ,self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=_UpperCAmelCase ,init_noise_sigma=_UpperCAmelCase ,timesteps=_UpperCAmelCase ,) def __lowercase ( self : str ,_UpperCAmelCase : DDPMSchedulerState ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : Optional[int] = None ): return sample def __lowercase ( self : int ,_UpperCAmelCase : DDPMSchedulerState ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple = () ): _a : Dict = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 _a : Optional[Any] = (jnp.arange(0 ,_UpperCAmelCase ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=_UpperCAmelCase ,timesteps=_UpperCAmelCase ,) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : DDPMSchedulerState ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : Union[str, Any]=None ): _a : Union[str, Any] = state.common.alphas_cumprod[t] _a : Optional[int] = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _a : Optional[Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: _a : Tuple = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": _a : Dict = jnp.clip(_UpperCAmelCase ,a_min=1E-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": _a : int = jnp.log(jnp.clip(_UpperCAmelCase ,a_min=1E-20 ) ) elif variance_type == "fixed_large": _a : Dict = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log _a : Tuple = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": _a : Optional[Any] = variance _a : Any = state.common.betas[t] _a : str = (predicted_variance + 1) / 2 _a : Dict = frac * max_log + (1 - frac) * min_log return variance def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : DDPMSchedulerState ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : int ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : Optional[jax.random.KeyArray] = None ,_UpperCAmelCase : bool = True ,): _a : Union[str, Any] = timestep if key is None: _a : Union[str, Any] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: _a , _a : List[str] = jnp.split(_UpperCAmelCase ,sample.shape[1] ,axis=1 ) else: _a : Any = None # 1. compute alphas, betas _a : List[Any] = state.common.alphas_cumprod[t] _a : int = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) ) _a : Tuple = 1 - alpha_prod_t _a : List[Any] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _a : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _a : Any = model_output elif self.config.prediction_type == "v_prediction": _a : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """ ' for the FlaxDDPMScheduler.' ) # 3. Clip "predicted x_0" if self.config.clip_sample: _a : Union[str, Any] = jnp.clip(_UpperCAmelCase ,-1 ,1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a : Dict = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t _a : Any = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _a : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): _a : List[Any] = jax.random.split(_UpperCAmelCase ,num=1 ) _a : int = jax.random.normal(_UpperCAmelCase ,shape=model_output.shape ,dtype=self.dtype ) return (self._get_variance(_UpperCAmelCase ,_UpperCAmelCase ,predicted_variance=_UpperCAmelCase ) ** 0.5) * noise _a : List[str] = jnp.where(t > 0 ,random_variance() ,jnp.zeros(model_output.shape ,dtype=self.dtype ) ) _a : Tuple = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=_UpperCAmelCase ,state=_UpperCAmelCase ) def __lowercase ( self : Tuple ,_UpperCAmelCase : DDPMSchedulerState ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : jnp.ndarray ,): return add_noise_common(state.common ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : Any ,_UpperCAmelCase : DDPMSchedulerState ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : jnp.ndarray ,_UpperCAmelCase : jnp.ndarray ,): return get_velocity_common(state.common ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) def __len__( self : List[Any] ): return self.config.num_train_timesteps
89
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray: _a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ ) return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) ) if __name__ == "__main__": # read original image __lowerCAmelCase = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value __lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __lowerCAmelCase , __lowerCAmelCase = gray_img.shape # set different points to rotate image __lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __lowerCAmelCase = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __lowerCAmelCase = plt.figure(1) __lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
89
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ = 100 ) -> int: _a : List[str] = (n * (n + 1) // 2) ** 2 _a : str = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f"""{solution() = }""")
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]: _a : Optional[Any] = [] _a : List[str] = set({'(', '[', '{'} ) _a : List[str] = set({')', ']', '}'} ) _a : Optional[int] = {'{': '}', '[': ']', '(': ')'} for i in range(len(lowerCAmelCase_ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(lowerCAmelCase_ ) == 0 or (len(lowerCAmelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(lowerCAmelCase_ ) == 0 def __lowerCamelCase ( ) -> str: _a : Any = input('Enter sequence of brackets: ' ) if is_balanced(lowerCAmelCase_ ): print(lowerCAmelCase_ , 'is balanced' ) else: print(lowerCAmelCase_ , 'is not balanced' ) if __name__ == "__main__": main()
89
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]: _a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) _a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ ) _a : List[str] = tok.pad_token_id def get_lens(lowerCAmelCase_ ): _a : Dict = tqdm( DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _a : Union[str, Any] = [] for batch in dl: _a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist() _a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ): max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) ) else: max_lens.extend(lowerCAmelCase_ ) return max_lens _a : str = get_lens(lowerCAmelCase_ ) _a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ ) _a : Dict = get_lens(lowerCAmelCase_ ) pickle_save(lowerCAmelCase_ , train_ds.len_file ) pickle_save(lowerCAmelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
89
1
'''simple docstring''' import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json __lowerCAmelCase = '''sshleifer/mar_enro_6_3_student''' class __magic_name__ ( _UpperCamelCase ): def __lowercase ( self : int ): super().setUp() _a : Any = cached_path( 'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' ,extract_compressed_file=_UpperCAmelCase ,) _a : List[str] = F"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k""" @slow @require_torch_gpu def __lowercase ( self : Dict ): MarianMTModel.from_pretrained(_UpperCAmelCase ) @slow @require_torch_gpu def __lowercase ( self : Any ): _a : Any = { '$MAX_LEN': 64, '$BS': 64, '$GAS': 1, '$ENRO_DIR': self.data_dir, 'facebook/mbart-large-cc25': MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", '--learning_rate=3e-5': '--learning_rate 3e-4', '--num_train_epochs 6': '--num_train_epochs 1', } # Clean up bash script _a : Union[str, Any] = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip() _a : Union[str, Any] = bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' ) for k, v in env_vars_to_replace.items(): _a : int = bash_script.replace(_UpperCAmelCase ,str(_UpperCAmelCase ) ) _a : List[str] = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") _a : Any = F""" --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 """.split() # XXX: args.gpus > 1 : handle multi_gpu in the future _a : List[str] = ['finetune.py'] + bash_script.split() + args with patch.object(_UpperCAmelCase ,'argv' ,_UpperCAmelCase ): _a : str = argparse.ArgumentParser() _a : Any = pl.Trainer.add_argparse_args(_UpperCAmelCase ) _a : List[Any] = SummarizationModule.add_model_specific_args(_UpperCAmelCase ,os.getcwd() ) _a : Optional[int] = parser.parse_args() _a : int = main(_UpperCAmelCase ) # Check metrics _a : Any = load_json(model.metrics_save_path ) _a : int = metrics['val'][0] _a : Tuple = metrics['val'][-1] self.assertEqual(len(metrics['val'] ) ,(args.max_epochs / args.val_check_interval) ) assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] ,_UpperCAmelCase ) self.assertGreater(last_step_stats['val_avg_gen_time'] ,0.01 ) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats['val_avg_gen_time'] ,1.0 ) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] ,2 ) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats['val_avg_bleu'] ,17 ) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) ,1.1 ) # check lightning ckpt can be loaded and has a reasonable statedict _a : Optional[Any] = os.listdir(_UpperCAmelCase ) _a : Optional[Any] = [x for x in contents if x.endswith('.ckpt' )][0] _a : List[str] = os.path.join(args.output_dir ,_UpperCAmelCase ) _a : Optional[int] = torch.load(_UpperCAmelCase ,map_location='cpu' ) _a : List[str] = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: _a : Optional[Any] = {os.path.basename(_UpperCAmelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1 class __magic_name__ ( _UpperCamelCase ): @timeout_decorator.timeout(600 ) @slow @require_torch_gpu def __lowercase ( self : Any ): _a : Any = F"""{self.test_file_dir_str}/test_data/wmt_en_ro""" _a : Union[str, Any] = { '--fp16_opt_level=O1': '', '$MAX_LEN': 128, '$BS': 16, '$GAS': 1, '$ENRO_DIR': data_dir, '$m': 'sshleifer/student_marian_en_ro_6_1', 'val_check_interval=0.25': 'val_check_interval=1.0', } # Clean up bash script _a : Any = ( (self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip() ) _a : Optional[int] = bash_script.replace('\\\n' ,'' ).strip().replace('"$@"' ,'' ) _a : Tuple = bash_script.replace('--fp16 ' ,' ' ) for k, v in env_vars_to_replace.items(): _a : Tuple = bash_script.replace(_UpperCAmelCase ,str(_UpperCAmelCase ) ) _a : Dict = self.get_auto_remove_tmp_dir() _a : int = bash_script.replace('--fp16' ,'' ) _a : Any = 6 _a : int = ( ['distillation.py'] + bash_script.split() + [ F"""--output_dir={output_dir}""", '--gpus=1', '--learning_rate=1e-3', F"""--num_train_epochs={epochs}""", '--warmup_steps=10', '--val_check_interval=1.0', '--do_predict', ] ) with patch.object(_UpperCAmelCase ,'argv' ,_UpperCAmelCase ): _a : List[str] = argparse.ArgumentParser() _a : Union[str, Any] = pl.Trainer.add_argparse_args(_UpperCAmelCase ) _a : Any = SummarizationDistiller.add_model_specific_args(_UpperCAmelCase ,os.getcwd() ) _a : Dict = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu _a : Dict = distill_main(_UpperCAmelCase ) # Check metrics _a : Optional[Any] = load_json(model.metrics_save_path ) _a : Optional[Any] = metrics['val'][0] _a : Union[str, Any] = metrics['val'][-1] assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] ,_UpperCAmelCase ) # check lightning ckpt can be loaded and has a reasonable statedict _a : Tuple = os.listdir(_UpperCAmelCase ) _a : Dict = [x for x in contents if x.endswith('.ckpt' )][0] _a : Union[str, Any] = os.path.join(args.output_dir ,_UpperCAmelCase ) _a : str = torch.load(_UpperCAmelCase ,map_location='cpu' ) _a : Union[str, Any] = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight' assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: _a : int = {os.path.basename(_UpperCAmelCase ) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics['test'] ) == 1
89
'''simple docstring''' from typing import Any class __magic_name__ : def __init__( self : List[Any] ,_UpperCAmelCase : Any ): _a : List[Any] = data _a : Union[str, Any] = None def __repr__( self : Any ): return F"""Node({self.data})""" class __magic_name__ : def __init__( self : int ): _a : Tuple = None def __iter__( self : str ): _a : int = self.head while node: yield node.data _a : Union[str, Any] = node.next def __len__( self : Optional[Any] ): return sum(1 for _ in self ) def __repr__( self : str ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __getitem__( self : Tuple ,_UpperCAmelCase : int ): if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ): if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) _a : Any = self.head for _ in range(_UpperCAmelCase ): _a : Optional[Any] = current.next _a : Optional[int] = data def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ): self.insert_nth(len(self ) ,_UpperCAmelCase ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ): self.insert_nth(0 ,_UpperCAmelCase ) def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ): if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) _a : int = Node(_UpperCAmelCase ) if self.head is None: _a : str = new_node elif index == 0: _a : List[str] = self.head # link new_node to head _a : Union[str, Any] = new_node else: _a : int = self.head for _ in range(index - 1 ): _a : Union[str, Any] = temp.next _a : List[str] = temp.next _a : Optional[int] = new_node def __lowercase ( self : Optional[int] ): # print every node data print(self ) def __lowercase ( self : str ): return self.delete_nth(0 ) def __lowercase ( self : str ): # delete from tail return self.delete_nth(len(self ) - 1 ) def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ): if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) _a : Optional[Any] = self.head # default first node if index == 0: _a : int = self.head.next else: _a : int = self.head for _ in range(index - 1 ): _a : str = temp.next _a : str = temp.next _a : int = temp.next.next return delete_node.data def __lowercase ( self : List[Any] ): return self.head is None def __lowercase ( self : Tuple ): _a : List[Any] = None _a : Tuple = self.head while current: # Store the current node's next node. _a : Dict = current.next # Make the current node's next point backwards _a : str = prev # Make the previous node be the current node _a : Tuple = current # Make the current node the next node (to progress iteration) _a : Optional[Any] = next_node # Return prev in order to put the head at the end _a : int = prev def __lowerCamelCase ( ) -> None: _a : List[str] = LinkedList() assert linked_list.is_empty() is True assert str(lowerCAmelCase_ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(lowerCAmelCase_ ) == i linked_list.insert_nth(lowerCAmelCase_ , i + 1 ) assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(lowerCAmelCase_ ) == 9 assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): _a : Union[str, Any] = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) ) def __lowerCamelCase ( ) -> None: _a : Dict = [ -9, 100, Node(77345112 ), 'dlrow olleH', 7, 5555, 0, -192.55_555, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] _a : List[Any] = LinkedList() for i in test_input: linked_list.insert_tail(lowerCAmelCase_ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head _a : List[str] = linked_list.delete_head() assert result == -9 assert ( str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail _a : Dict = linked_list.delete_tail() assert result == 12.2 assert ( str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list _a : Optional[Any] = linked_list.delete_nth(10 ) assert result is None assert ( str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(lowerCAmelCase_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(lowerCAmelCase_ ) assert ( str(lowerCAmelCase_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(lowerCAmelCase_ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def __lowerCamelCase ( ) -> Union[str, Any]: from doctest import testmod testmod() _a : Optional[int] = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(lowerCAmelCase_ ) print('\nReading/changing Node data using indexing:' ) print(f"""Element at Position 1: {linked_list[1]}""" ) _a : Optional[Any] = input('Enter New Value: ' ).strip() print('New list:' ) print(lowerCAmelCase_ ) print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" ) if __name__ == "__main__": main()
89
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ = 10**12 ) -> int: _a : Tuple = 1 _a : List[Any] = 0 _a : List[str] = 1 _a : Dict = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f"""{solution() = }""")
89
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __lowerCAmelCase = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,): _a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )] if identifier is not None: _a : str = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for n_ in n_identifier: _a : int = [file for file in files if n_ not in file] else: _a : Optional[Any] = [file for file in files if n_identifier not in file] _a : Dict = ignore_files or [] ignore_files.append('__init__.py' ) _a : List[str] = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' ,_UpperCAmelCase ) if only_modules: _a : Any = file.split('.' )[0] try: _a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict = doctest.DocTestSuite(_UpperCAmelCase ) _a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) ,0 ) except AttributeError: logger.info(F"""{module_identifier} is not a module.""" ) else: _a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed ,0 ) def __lowercase ( self : Union[str, Any] ): _a : Optional[Any] = Path('src/transformers' ) _a : Optional[Any] = 'modeling' _a : Union[str, Any] = [ 'modeling_ctrl.py', 'modeling_tf_ctrl.py', ] self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase ) def __lowercase ( self : int ): _a : str = Path('src/transformers' ) _a : List[str] = 'tokenization' self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ) def __lowercase ( self : int ): _a : Any = Path('src/transformers' ) _a : str = 'configuration' self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ) def __lowercase ( self : Dict ): _a : Tuple = Path('src/transformers' ) _a : Optional[int] = ['configuration', 'modeling', 'tokenization'] self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): _a : Union[str, Any] = Path('docs/source' ) _a : List[str] = ['favicon.ico'] self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
89
1
'''simple docstring''' import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging __lowerCAmelCase = { '''cola''': 2, '''mnli''': 3, '''mrpc''': 2, '''sst-2''': 2, '''sts-b''': 1, '''qqp''': 2, '''qnli''': 2, '''rte''': 2, '''wnli''': 2, } logging.set_verbosity_info() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Dict: # Initialise PyTorch model _a : Union[str, Any] = XLNetConfig.from_json_file(lowerCAmelCase_ ) _a : Any = finetuning_task.lower() if finetuning_task is not None else '' if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) _a : List[Any] = finetuning_task _a : str = GLUE_TASKS_NUM_LABELS[finetuning_task] _a : Dict = XLNetForSequenceClassification(lowerCAmelCase_ ) elif "squad" in finetuning_task: _a : int = finetuning_task _a : List[Any] = XLNetForQuestionAnswering(lowerCAmelCase_ ) else: _a : Dict = XLNetLMHeadModel(lowerCAmelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Save pytorch-model _a : Tuple = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) _a : str = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) print(f"""Save PyTorch model to {os.path.abspath(lowerCAmelCase_ )}""" ) torch.save(model.state_dict() , lowerCAmelCase_ ) print(f"""Save configuration file to {os.path.abspath(lowerCAmelCase_ )}""" ) with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--xlnet_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained XLNet model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--finetuning_task''', default=None, type=str, help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''', ) __lowerCAmelCase = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
89
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) __lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]: for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: _a : List[Any] = model_type_to_module_name(lowerCAmelCase_ ) _a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' ) try: return getattr(lowerCAmelCase_ , lowerCAmelCase_ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _a : Dict = importlib.import_module('transformers' ) if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): return getattr(lowerCAmelCase_ , lowerCAmelCase_ ) return None def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple: _a : List[str] = get_file_from_repo( lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , ) if resolved_config_file is None: logger.info( 'Could not locate the image processor configuration file, will try to use the model config instead.' ) return {} with open(lowerCAmelCase_ , encoding='utf-8' ) as reader: return json.load(lowerCAmelCase_ ) class __magic_name__ : def __init__( self : List[str] ): raise EnvironmentError( 'AutoImageProcessor is designed to be instantiated ' 'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(_UpperCAmelCase ) def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ): _a : Any = kwargs.pop('config' ,_UpperCAmelCase ) _a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase ) _a : Any = True _a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase ) _a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase ) _a : int = None if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ): _a : Any = config_dict['auto_map']['AutoImageProcessor'] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: _a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase ) if feature_extractor_class is not None: logger.warning( 'Could not find image processor class in the image processor config or the model config. Loading' ' based on pattern matching with the model\'s feature extractor configuration.' ) _a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' ) if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ): _a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor'] _a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' ) logger.warning( 'Could not find image processor auto map in the image processor config or the model config.' ' Loading based on pattern matching with the model\'s feature extractor configuration.' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) # It could be in `config.image_processor_type`` _a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase ) if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map: _a : Union[str, Any] = config.auto_map['AutoImageProcessor'] if image_processor_class is not None: _a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase ) _a : List[str] = image_processor_auto_map is not None _a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING _a : Optional[int] = resolve_trust_remote_code( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) if has_remote_code and trust_remote_code: _a : Dict = get_class_from_dynamic_module( _UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase ) _a : int = kwargs.pop('code_revision' ,_UpperCAmelCase ) if os.path.isdir(_UpperCAmelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING: _a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )] return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) raise ValueError( F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """ F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ): IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
89
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __lowerCAmelCase = None __lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>''' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __lowerCAmelCase = [ np.dtype('''|b1'''), np.dtype('''|u1'''), np.dtype('''<u2'''), np.dtype('''>u2'''), np.dtype('''<i2'''), np.dtype('''>i2'''), np.dtype('''<u4'''), np.dtype('''>u4'''), np.dtype('''<i4'''), np.dtype('''>i4'''), np.dtype('''<f4'''), np.dtype('''>f4'''), np.dtype('''<f8'''), np.dtype('''>f8'''), ] @dataclass class __magic_name__ : lowerCAmelCase : bool = True lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "PIL.Image.Image" lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase ) def __call__( self : Union[str, Any] ): return self.pa_type def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Optional[Any] = np.array(_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": value, "bytes": None} elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": None, "bytes": value} elif isinstance(_UpperCAmelCase ,np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase ,PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(_UpperCAmelCase ) elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ): if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support decoding images, please install \'Pillow\'.' ) if token_per_repo_id is None: _a : Dict = {} _a , _a : str = value['path'], value['bytes'] if bytes_ is None: if path is None: raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" ) else: if is_local_path(_UpperCAmelCase ): _a : Any = PIL.Image.open(_UpperCAmelCase ) else: _a : List[Any] = path.split('::' )[-1] try: _a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id'] _a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase ) except ValueError: _a : int = None with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f: _a : Tuple = BytesIO(f.read() ) _a : Union[str, Any] = PIL.Image.open(bytes_ ) else: _a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def __lowercase ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value('binary' ), "path": Value('string' ), } ) def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): _a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) _a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: _a : Union[str, Any] = storage.field('bytes' ) else: _a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: _a : Union[str, Any] = storage.field('path' ) else: _a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_list(storage.type ): _a : List[str] = pa.array( [encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,) _a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays( [bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(_UpperCAmelCase : Tuple ): with xopen(_UpperCAmelCase ,'rb' ) as f: _a : int = f.read() return bytes_ _a : Any = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] ,type=pa.binary() ,) _a : Optional[Any] = pa.array( [os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,) _a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes: _a : Optional[int] = BytesIO() if image.format in list_image_compression_formats(): _a : Optional[Any] = image.format else: _a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF' image.save(lowerCAmelCase_ , format=lowerCAmelCase_ ) return buffer.getvalue() def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) _a : List[Any] = array.dtype _a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER _a : Union[str, Any] = dtype.kind _a : Union[str, Any] = dtype.itemsize _a : List[Any] = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _a : Optional[int] = np.dtype('|u1' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _a : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ ) _a : List[Any] = np.dtype(lowerCAmelCase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) _a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) ) return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if objs: _a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowerCAmelCase_ , np.ndarray ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] elif isinstance(lowerCAmelCase_ , PIL.Image.Image ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] else: return objs else: return objs
89
1
'''simple docstring''' import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple: print('Loading config file...' ) def flatten_yaml_as_dict(lowerCAmelCase_ , lowerCAmelCase_="" , lowerCAmelCase_="." ): _a : Optional[Any] = [] for k, v in d.items(): _a : str = parent_key + sep + k if parent_key else k if isinstance(lowerCAmelCase_ , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(lowerCAmelCase_ , lowerCAmelCase_ , sep=lowerCAmelCase_ ).items() ) else: items.append((new_key, v) ) return dict(lowerCAmelCase_ ) _a : Optional[Any] = argparse.Namespace() with open(lowerCAmelCase_ , 'r' ) as yaml_file: try: _a : List[str] = yaml.load(lowerCAmelCase_ , Loader=yaml.FullLoader ) _a : str = flatten_yaml_as_dict(lowerCAmelCase_ ) for k, v in flat_cfg.items(): setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(lowerCAmelCase_ , str(lowerCAmelCase_ ) ) ) return config def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _a : int = MobileViTVaConfig() _a : Tuple = False # dataset if task_name.startswith('imagenet1k_' ): _a : Any = 1000 if int(task_name.strip().split('_' )[-1] ) == 384: _a : Optional[Any] = 384 else: _a : List[Any] = 256 _a : Optional[Any] = 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): _a : str = 21000 if int(task_name.strip().split('_' )[-1] ) == 384: _a : Any = 384 else: _a : int = 256 _a : List[Any] = 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): _a : Union[str, Any] = 151 _a : Any = 512 _a : str = 'ade20k-id2label.json' _a : int = True elif task_name.startswith('voc_' ): _a : Tuple = 21 _a : Optional[Any] = 512 _a : Optional[int] = 'pascal-voc-id2label.json' _a : Tuple = True # orig_config _a : List[str] = load_orig_config_file(lowerCAmelCase_ ) assert getattr(lowerCAmelCase_ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model" _a : List[str] = getattr(lowerCAmelCase_ , 'model.classification.mitv2.width_multiplier' , 1.0 ) assert ( getattr(lowerCAmelCase_ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" _a : Tuple = getattr(lowerCAmelCase_ , 'model.classification.activation.name' , 'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: _a : Tuple = getattr(lowerCAmelCase_ , 'model.segmentation.output_stride' , 16 ) if "_deeplabv3" in task_name: _a : str = getattr(lowerCAmelCase_ , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] ) _a : str = getattr(lowerCAmelCase_ , 'model.segmentation.deeplabv3.aspp_out_channels' , 512 ) _a : int = getattr(lowerCAmelCase_ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 ) # id2label _a : Optional[int] = 'huggingface/label-files' _a : int = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) ) _a : List[str] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} _a : int = idalabel _a : List[Any] = {v: k for k, v in idalabel.items()} return config def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _a : List[Any] = dct.pop(lowerCAmelCase_ ) _a : List[Any] = val def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Union[str, Any]: if base_model: _a : str = '' else: _a : Optional[int] = 'mobilevitv2.' _a : Tuple = [] for k in state_dict.keys(): if k[:8] == "encoder.": _a : Optional[Any] = k[8:] else: _a : Union[str, Any] = k if ".block." in k: _a : List[Any] = k_new.replace('.block.' , '.' ) if ".conv." in k: _a : Tuple = k_new.replace('.conv.' , '.convolution.' ) if ".norm." in k: _a : Optional[Any] = k_new.replace('.norm.' , '.normalization.' ) if "conv_1." in k: _a : List[str] = k_new.replace('conv_1.' , f"""{model_prefix}conv_stem.""" ) for i in [1, 2]: if f"""layer_{i}.""" in k: _a : Optional[int] = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" ) if ".exp_1x1." in k: _a : int = k_new.replace('.exp_1x1.' , '.expand_1x1.' ) if ".red_1x1." in k: _a : Tuple = k_new.replace('.red_1x1.' , '.reduce_1x1.' ) for i in [3, 4, 5]: if f"""layer_{i}.0.""" in k: _a : Union[str, Any] = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" ) if f"""layer_{i}.1.local_rep.0.""" in k: _a : Tuple = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" ) if f"""layer_{i}.1.local_rep.1.""" in k: _a : int = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" ) for i in [3, 4, 5]: if i == 3: _a : Dict = [0, 1] elif i == 4: _a : Union[str, Any] = [0, 1, 2, 3] elif i == 5: _a : Union[str, Any] = [0, 1, 2] for j in j_in: if f"""layer_{i}.1.global_rep.{j}.""" in k: _a : Optional[Any] = k_new.replace( f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" ) if f"""layer_{i}.1.global_rep.{j+1}.""" in k: _a : Dict = k_new.replace( f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" ) if f"""layer_{i}.1.conv_proj.""" in k: _a : Any = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" ) if "pre_norm_attn.0." in k: _a : List[str] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' ) if "pre_norm_attn.1." in k: _a : Tuple = k_new.replace('pre_norm_attn.1.' , 'attention.' ) if "pre_norm_ffn.0." in k: _a : Optional[Any] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' ) if "pre_norm_ffn.1." in k: _a : Union[str, Any] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' ) if "pre_norm_ffn.3." in k: _a : Optional[int] = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' ) if "classifier.1." in k: _a : List[Any] = k_new.replace('classifier.1.' , 'classifier.' ) if "seg_head." in k: _a : str = k_new.replace('seg_head.' , 'segmentation_head.' ) if ".aspp_layer." in k: _a : Tuple = k_new.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in k: _a : List[str] = k_new.replace('.aspp_pool.' , '.' ) rename_keys.append((k, k_new) ) return rename_keys def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict: _a : Any = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(lowerCAmelCase_ ) for k in keys_to_ignore: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCamelCase ( ) -> Any: _a : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" _a : Union[str, Any] = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _a : Union[str, Any] = get_mobilevitva_config(lowerCAmelCase_ , lowerCAmelCase_ ) # load original state_dict _a : Optional[int] = torch.load(lowerCAmelCase_ , map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): _a : Optional[Any] = MobileViTVaForSemanticSegmentation(lowerCAmelCase_ ).eval() _a : Tuple = False else: _a : int = MobileViTVaForImageClassification(lowerCAmelCase_ ).eval() _a : Optional[Any] = False # remove and rename some keys of load the original model _a : str = checkpoint remove_unused_keys(lowerCAmelCase_ ) _a : Dict = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # load modified state_dict model.load_state_dict(lowerCAmelCase_ ) # Check outputs on an image, prepared by MobileViTImageProcessor _a : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) _a : Dict = image_processor(images=prepare_img() , return_tensors='pt' ) _a : Dict = model(**lowerCAmelCase_ ) # verify classification model if task_name.startswith('imagenet' ): _a : Dict = outputs.logits _a : int = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant _a : List[str] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ) assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase_ ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''imagenet1k_256''', type=str, help=( '''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . ''' ''' Classification (ImageNet-1k) - MobileViTV2 (256x256) : imagenet1k_256 - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : imagenet21k_to_1k_256 - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on ImageNet-1k 384x384) : imagenet21k_to_1k_384 Segmentation - ADE20K Dataset : ade20k_deeplabv3 - Pascal VOC 2012 Dataset: voc_deeplabv3 ''' ), choices=[ '''imagenet1k_256''', '''imagenet1k_384''', '''imagenet21k_to_1k_256''', '''imagenet21k_to_1k_384''', '''ade20k_deeplabv3''', '''voc_deeplabv3''', ], ) parser.add_argument( '''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).''' ) parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) __lowerCAmelCase = parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
89
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]: _a : Optional[int] = list(lowerCAmelCase_ ) _a : Optional[Any] = list(lowerCAmelCase_ ) _a : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if lista[i] != lista[i]: count += 1 _a : Optional[int] = '_' if count > 1: return False else: return "".join(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]: _a : Optional[int] = [] while True: _a : Any = ['$'] * len(lowerCAmelCase_ ) _a : List[str] = [] for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): _a : Optional[int] = compare_string(binary[i] , binary[j] ) if k is False: _a : Optional[Any] = '*' _a : Optional[Any] = '*' temp.append('X' ) for i in range(len(lowerCAmelCase_ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(lowerCAmelCase_ ) == 0: return pi _a : Any = list(set(lowerCAmelCase_ ) ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : int = [] for minterm in minterms: _a : Optional[int] = '' for _ in range(lowerCAmelCase_ ): _a : Union[str, Any] = str(minterm % 2 ) + string minterm //= 2 temp.append(lowerCAmelCase_ ) return temp def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool: _a : int = list(lowerCAmelCase_ ) _a : Union[str, Any] = list(lowerCAmelCase_ ) _a : str = 0 for i in range(len(lowerCAmelCase_ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : List[Any] = [] _a : Optional[Any] = [0] * len(lowerCAmelCase_ ) for i in range(len(chart[0] ) ): _a : Union[str, Any] = 0 _a : int = -1 for j in range(len(lowerCAmelCase_ ) ): if chart[j][i] == 1: count += 1 _a : int = j if count == 1: _a : List[Any] = 1 for i in range(len(lowerCAmelCase_ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(lowerCAmelCase_ ) ): _a : Any = 0 temp.append(prime_implicants[i] ) while True: _a : Union[str, Any] = 0 _a : List[Any] = -1 _a : str = 0 for i in range(len(lowerCAmelCase_ ) ): _a : Union[str, Any] = chart[i].count(1 ) if count_n > max_n: _a : Any = count_n _a : int = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(lowerCAmelCase_ ) ): _a : List[str] = 0 def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]: _a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )] for i in range(len(lowerCAmelCase_ ) ): _a : str = prime_implicants[i].count('_' ) for j in range(len(lowerCAmelCase_ ) ): if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ): _a : Optional[Any] = 1 return chart def __lowerCamelCase ( ) -> None: _a : Optional[int] = int(input('Enter the no. of variables\n' ) ) _a : List[Any] = [ float(lowerCAmelCase_ ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] _a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ ) _a : Dict = check(lowerCAmelCase_ ) print('Prime Implicants are:' ) print(lowerCAmelCase_ ) _a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ ) _a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ ) print('Essential Prime Implicants are:' ) print(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
89
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ ) -> list[int]: _a : Union[str, Any] = len(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ): for j in range(i + 1 , lowerCAmelCase_ ): if numbers[j] < numbers[i]: _a , _a : List[Any] = numbers[j], numbers[i] return numbers if __name__ == "__main__": __lowerCAmelCase = input('''Enter numbers separated by a comma:\n''').strip() __lowerCAmelCase = [int(item) for item in user_input.split(''',''')] print(exchange_sort(unsorted))
89
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase = { '''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''], '''tokenization_cpmant''': ['''CpmAntTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CpmAntForCausalLM''', '''CpmAntModel''', '''CpmAntPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __lowerCAmelCase = 256_047 __lowerCAmelCase = 256_145 @require_sentencepiece @require_tokenizers class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Tuple = NllbTokenizer lowerCAmelCase : Tuple = NllbTokenizerFast lowerCAmelCase : List[str] = True lowerCAmelCase : Union[str, Any] = True lowerCAmelCase : List[str] = {} def __lowercase ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing _a : int = NllbTokenizer(_UpperCAmelCase ,keep_accents=_UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase ( self : Any ): _a : Optional[Any] = NllbTokenizer(_UpperCAmelCase ,keep_accents=_UpperCAmelCase ) _a : List[str] = tokenizer.tokenize('This is a test' ) self.assertListEqual(_UpperCAmelCase ,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) _a : Optional[int] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( _UpperCAmelCase ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] ,) _a : Tuple = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) _a : Dict = tokenizer.convert_ids_to_tokens(_UpperCAmelCase ) self.assertListEqual( _UpperCAmelCase ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] ,) def __lowercase ( self : Optional[Any] ): _a : Tuple = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : List[str] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : Tuple = self.tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : Tuple = tempfile.mkdtemp() _a : str = tokenizer_r.save_pretrained(_UpperCAmelCase ) _a : str = tokenizer_p.save_pretrained(_UpperCAmelCase ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) _a : int = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(_UpperCAmelCase ,_UpperCAmelCase ) # Checks everything loads correctly in the same way _a : Union[str, Any] = tokenizer_r.from_pretrained(_UpperCAmelCase ) _a : Optional[Any] = tokenizer_p.from_pretrained(_UpperCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCAmelCase ,_UpperCAmelCase ) ) shutil.rmtree(_UpperCAmelCase ) # Save tokenizer rust, legacy_format=True _a : Any = tempfile.mkdtemp() _a : Tuple = tokenizer_r.save_pretrained(_UpperCAmelCase ,legacy_format=_UpperCAmelCase ) _a : Optional[int] = tokenizer_p.save_pretrained(_UpperCAmelCase ) # Checks it save with the same files self.assertSequenceEqual(_UpperCAmelCase ,_UpperCAmelCase ) # Checks everything loads correctly in the same way _a : Optional[Any] = tokenizer_r.from_pretrained(_UpperCAmelCase ) _a : Optional[Any] = tokenizer_p.from_pretrained(_UpperCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCAmelCase ,_UpperCAmelCase ) ) shutil.rmtree(_UpperCAmelCase ) # Save tokenizer rust, legacy_format=False _a : Dict = tempfile.mkdtemp() _a : Any = tokenizer_r.save_pretrained(_UpperCAmelCase ,legacy_format=_UpperCAmelCase ) _a : Union[str, Any] = tokenizer_p.save_pretrained(_UpperCAmelCase ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _a : int = tokenizer_r.from_pretrained(_UpperCAmelCase ) _a : List[str] = tokenizer_p.from_pretrained(_UpperCAmelCase ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(_UpperCAmelCase ,_UpperCAmelCase ) ) shutil.rmtree(_UpperCAmelCase ) @require_torch def __lowercase ( self : int ): if not self.test_seqaseq: return _a : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Longer text that will definitely require truncation. _a : Any = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for' ' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons' ' will only worsen the violence and misery for millions of people.', ] _a : Optional[Any] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al' ' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi' ' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] try: _a : Optional[int] = tokenizer.prepare_seqaseq_batch( src_texts=_UpperCAmelCase ,tgt_texts=_UpperCAmelCase ,max_length=3 ,max_target_length=10 ,return_tensors='pt' ,src_lang='eng_Latn' ,tgt_lang='ron_Latn' ,) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,10 ) # max_target_length will default to max_length if not specified _a : List[str] = tokenizer.prepare_seqaseq_batch( _UpperCAmelCase ,tgt_texts=_UpperCAmelCase ,max_length=3 ,return_tensors='pt' ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.labels.shape[1] ,3 ) _a : Dict = tokenizer.prepare_seqaseq_batch( src_texts=_UpperCAmelCase ,max_length=3 ,max_target_length=10 ,return_tensors='pt' ) self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 ) self.assertNotIn('decoder_input_ids' ,_UpperCAmelCase ) @unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' ) def __lowercase ( self : List[str] ): pass def __lowercase ( self : Dict ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : Dict = [AddedToken('<special>' ,lstrip=_UpperCAmelCase )] _a : Dict = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase ,additional_special_tokens=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Tuple = tokenizer_r.encode('Hey this is a <special> token' ) _a : Dict = tokenizer_r.encode('<special>' ,add_special_tokens=_UpperCAmelCase )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: _a : Tuple = self.rust_tokenizer_class.from_pretrained( _UpperCAmelCase ,additional_special_tokens=_UpperCAmelCase ,**_UpperCAmelCase ,) _a : Any = self.tokenizer_class.from_pretrained( _UpperCAmelCase ,additional_special_tokens=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Tuple = tokenizer_p.encode('Hey this is a <special> token' ) _a : Optional[Any] = tokenizer_cr.encode('Hey this is a <special> token' ) self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class __magic_name__ ( unittest.TestCase ): lowerCAmelCase : Optional[int] = 'facebook/nllb-200-distilled-600M' lowerCAmelCase : int = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowerCAmelCase : Optional[int] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowerCAmelCase : int = [ 2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 8_1_6_5, 2_4_8_0_6_6, 1_4_7_3_4, 9_5_0, 1_1_3_5, 1_0_5_7_2_1, 3_5_7_3, 8_3, 2_7_3_5_2, 1_0_8, 4_9_4_8_6, 2, ] @classmethod def __lowercase ( cls : Union[str, Any] ): _a : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name ,src_lang='eng_Latn' ,tgt_lang='ron_Latn' ) _a : Optional[Any] = 1 return cls def __lowercase ( self : Any ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] ,256001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] ,256002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] ,256057 ) def __lowercase ( self : List[Any] ): _a : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,_UpperCAmelCase ) def __lowercase ( self : Any ): self.assertIn(_UpperCAmelCase ,self.tokenizer.all_special_ids ) # fmt: off _a : Dict = [RO_CODE, 4254, 98068, 112923, 39072, 3909, 713, 102767, 26, 17314, 35642, 14683, 33118, 2022, 66987, 2, 256047] # fmt: on _a : Tuple = self.tokenizer.decode(_UpperCAmelCase ,skip_special_tokens=_UpperCAmelCase ) _a : List[str] = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase ) self.assertNotIn(self.tokenizer.eos_token ,_UpperCAmelCase ) def __lowercase ( self : Optional[int] ): _a : Optional[int] = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0] ,_UpperCAmelCase ) _a : List[str] = 10 _a : List[Any] = self.tokenizer(_UpperCAmelCase ,max_length=_UpperCAmelCase ,truncation=_UpperCAmelCase ).input_ids[0] self.assertEqual(ids[-1] ,2 ) self.assertEqual(ids[0] ,_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) ,_UpperCAmelCase ) def __lowercase ( self : Dict ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) ,[256203, 3] ) def __lowercase ( self : List[Any] ): _a : Union[str, Any] = tempfile.mkdtemp() _a : Tuple = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(_UpperCAmelCase ) _a : Tuple = NllbTokenizer.from_pretrained(_UpperCAmelCase ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,_UpperCAmelCase ) @require_torch def __lowercase ( self : Optional[int] ): _a : Optional[Any] = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors='pt' ,) _a : Tuple = shift_tokens_right( batch['labels'] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id['ron_Latn'] ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) self.assertEqual((2, 15) ,batch.input_ids.shape ) self.assertEqual((2, 15) ,batch.attention_mask.shape ) _a : Optional[int] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase ,batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] ) def __lowercase ( self : List[str] ): _a : int = self.tokenizer(self.src_text ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,max_length=3 ,return_tensors='pt' ) _a : str = self.tokenizer( text_target=self.tgt_text ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,max_length=10 ,return_tensors='pt' ) _a : Union[str, Any] = targets['input_ids'] _a : Dict = shift_tokens_right( _UpperCAmelCase ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,10 ) @require_torch def __lowercase ( self : List[Any] ): _a : int = self.tokenizer._build_translation_inputs( 'A test' ,return_tensors='pt' ,src_lang='eng_Latn' ,tgt_lang='fra_Latn' ) self.assertEqual( nested_simplify(_UpperCAmelCase ) ,{ # A, test, EOS, en_XX 'input_ids': [[256047, 70, 7356, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 256057, } ,) @require_torch def __lowercase ( self : Union[str, Any] ): _a : List[str] = True _a : str = self.tokenizer( 'UN Chief says there is no military solution in Syria' ,src_lang='eng_Latn' ,tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids ,[16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2, 256047] ) _a : Tuple = False _a : str = self.tokenizer( 'UN Chief says there is no military solution in Syria' ,src_lang='eng_Latn' ,tgt_lang='fra_Latn' ) self.assertEqual( inputs.input_ids ,[256047, 16297, 134408, 25653, 6370, 248, 254, 103929, 94995, 108, 49486, 2] )
89
'''simple docstring''' import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : str = LayoutLMTokenizer lowerCAmelCase : Tuple = LayoutLMTokenizerFast lowerCAmelCase : List[Any] = True lowerCAmelCase : int = True def __lowercase ( self : Dict ): super().setUp() _a : int = [ '[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ): _a : Optional[int] = 'UNwant\u00E9d,running' _a : List[Any] = 'unwanted, running' return input_text, output_text def __lowercase ( self : Optional[int] ): _a : Optional[Any] = self.tokenizer_class(self.vocab_file ) _a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] ) def __lowercase ( self : Optional[int] ): pass
89
1
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray: _a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ ) return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) ) if __name__ == "__main__": # read original image __lowerCAmelCase = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value __lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __lowerCAmelCase , __lowerCAmelCase = gray_img.shape # set different points to rotate image __lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __lowerCAmelCase = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __lowerCAmelCase = plt.figure(1) __lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
89
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Any = 'conditional_detr' lowerCAmelCase : List[str] = ['past_key_values'] lowerCAmelCase : Optional[int] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,): if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) _a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : str = backbone_config.get('model_type' ) _a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] _a : List[Any] = config_class.from_dict(_UpperCAmelCase ) _a : Tuple = use_timm_backbone _a : Union[str, Any] = backbone_config _a : List[Any] = num_channels _a : Union[str, Any] = num_queries _a : Optional[Any] = d_model _a : Tuple = encoder_ffn_dim _a : Dict = encoder_layers _a : List[str] = encoder_attention_heads _a : Union[str, Any] = decoder_ffn_dim _a : Optional[int] = decoder_layers _a : int = decoder_attention_heads _a : Optional[int] = dropout _a : Tuple = attention_dropout _a : List[Any] = activation_dropout _a : str = activation_function _a : Optional[Any] = init_std _a : Union[str, Any] = init_xavier_std _a : List[Any] = encoder_layerdrop _a : List[Any] = decoder_layerdrop _a : Dict = encoder_layers _a : List[Any] = auxiliary_loss _a : Optional[int] = position_embedding_type _a : List[Any] = backbone _a : Optional[int] = use_pretrained_backbone _a : Optional[int] = dilation # Hungarian matcher _a : Tuple = class_cost _a : str = bbox_cost _a : Any = giou_cost # Loss coefficients _a : Tuple = mask_loss_coefficient _a : Dict = dice_loss_coefficient _a : Tuple = cls_loss_coefficient _a : Any = bbox_loss_coefficient _a : Dict = giou_loss_coefficient _a : Union[str, Any] = focal_alpha super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase ) @property def __lowercase ( self : Dict ): return self.encoder_attention_heads @property def __lowercase ( self : str ): return self.d_model def __lowercase ( self : int ): _a : List[str] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _a : Dict = self.backbone_config.to_dict() _a : Union[str, Any] = self.__class__.model_type return output class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = version.parse('1.11' ) @property def __lowercase ( self : Dict ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ] ) @property def __lowercase ( self : Any ): return 1E-5 @property def __lowercase ( self : List[Any] ): return 12
89
1
'''simple docstring''' __lowerCAmelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def __lowerCamelCase ( ) -> None: _a : List[Any] = input('Enter message: ' ) _a : Optional[int] = input('Enter key [alphanumeric]: ' ) _a : Union[str, Any] = input('Encrypt/Decrypt [e/d]: ' ) if mode.lower().startswith('e' ): _a : Dict = 'encrypt' _a : Tuple = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ ) elif mode.lower().startswith('d' ): _a : List[Any] = 'decrypt' _a : List[Any] = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ ) print(f"""\n{mode.title()}ed message:""" ) print(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str: return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , 'encrypt' ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str: return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , 'decrypt' ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _a : Optional[Any] = [] _a : List[str] = 0 _a : Union[str, Any] = key.upper() for symbol in message: _a : Optional[Any] = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(lowerCAmelCase_ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(lowerCAmelCase_ ): _a : List[str] = 0 else: translated.append(lowerCAmelCase_ ) return "".join(lowerCAmelCase_ ) if __name__ == "__main__": main()
89
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __magic_name__ : def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,): _a : Optional[Any] = parent _a : List[Any] = batch_size _a : str = image_size _a : Union[str, Any] = num_channels _a : List[Any] = num_stages _a : Dict = hidden_sizes _a : int = depths _a : Tuple = is_training _a : List[str] = use_labels _a : Dict = intermediate_size _a : int = hidden_act _a : int = num_labels _a : Any = initializer_range _a : Tuple = out_features _a : int = out_indices _a : List[Any] = scope def __lowercase ( self : Dict ): _a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : Union[str, Any] = None if self.use_labels: _a : Tuple = ids_tensor([self.batch_size] ,self.num_labels ) _a : str = self.get_config() return config, pixel_values, labels def __lowercase ( self : Any ): return ConvNextVaConfig( num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ): _a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Any = model(_UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ): _a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ): _a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Dict = model(_UpperCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] ) # verify backbone works with out_features=None _a : Tuple = None _a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def __lowercase ( self : Optional[Any] ): _a : Any = self.prepare_config_and_inputs() _a , _a , _a : Union[str, Any] = config_and_inputs _a : Any = {'pixel_values': pixel_values} return config, inputs_dict def __lowercase ( self : str ): _a : Tuple = self.prepare_config_and_inputs() _a , _a , _a : Tuple = config_and_inputs _a : List[Any] = {'pixel_values': pixel_values, 'labels': labels} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : str = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCAmelCase : str = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : int = False lowerCAmelCase : str = False lowerCAmelCase : Optional[Any] = False lowerCAmelCase : List[str] = False lowerCAmelCase : Optional[int] = False def __lowercase ( self : List[Any] ): _a : str = ConvNextVaModelTester(self ) _a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 ) def __lowercase ( self : Optional[Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowercase ( self : str ): return @unittest.skip(reason='ConvNextV2 does not use inputs_embeds' ) def __lowercase ( self : List[Any] ): pass @unittest.skip(reason='ConvNextV2 does not support input and output embeddings' ) def __lowercase ( self : Optional[int] ): pass @unittest.skip(reason='ConvNextV2 does not use feedforward chunking' ) def __lowercase ( self : Any ): pass def __lowercase ( self : List[str] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Any = True if model_class.__name__ in [ *get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase ), ]: continue _a : Optional[Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() _a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : Optional[int] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : str ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Optional[int] = False _a : Tuple = True if ( model_class.__name__ in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue _a : Tuple = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.gradient_checkpointing_enable() model.train() _a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : List[Any] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : List[Any] ): _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = model_class(_UpperCAmelCase ) _a : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Dict = [*signature.parameters.keys()] _a : int = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_UpperCAmelCase ) def __lowercase ( self : int ): _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def __lowercase ( self : Any ): def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ): _a : Union[str, Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) ) _a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _a : str = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) _a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a : Optional[Any] = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : List[Any] ): _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def __lowercase ( self : int ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __lowerCamelCase ( ) -> List[Any]: _a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): @cached_property def __lowercase ( self : Optional[Any] ): return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None @slow def __lowercase ( self : Any ): _a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase ) _a : Optional[int] = self.default_image_processor _a : str = prepare_img() _a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _a : Dict = model(**_UpperCAmelCase ) # verify the logits _a : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,_UpperCAmelCase ) _a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
89
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str: if not (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )): raise ValueError('longest_common_substring() takes two strings for inputs' ) _a : List[str] = len(lowerCAmelCase_ ) _a : int = len(lowerCAmelCase_ ) _a : Any = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] _a : Any = 0 _a : List[str] = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: _a : Optional[Any] = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: _a : int = i _a : Optional[int] = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' import numpy as np from scipy.spatial.distance import cdist from sklearn.metrics import fa_score import datasets __lowerCAmelCase = '''\ @inproceedings{kakwani2020indicnlpsuite, title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}}, author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar}, year={2020}, booktitle={Findings of EMNLP}, } ''' __lowerCAmelCase = '''\ IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. ''' __lowerCAmelCase = ''' Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset. Args: predictions: list of predictions to score (as int64), except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32). references: list of ground truth labels corresponding to the predictions (as int64), except for \'cvit-mkb-clsr\' where each reference is a vector (of float32). Returns: depending on the IndicGLUE subset, one or several of: "accuracy": Accuracy "f1": F1 score "precision": Precision@10 Examples: >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"] >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\') >>> references = [0, 1] >>> predictions = [0, 1] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'accuracy\': 1.0, \'f1\': 1.0} >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\') >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]] >>> results = indic_glue_metric.compute(predictions=predictions, references=references) >>> print(results) {\'precision@10\': 1.0} ''' def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]: return float((preds == labels).mean() ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _a : Dict = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ ) _a : Dict = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ ) ) return { "accuracy": acc, "f1": fa, } def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]: _a : List[Any] = np.array(lowerCAmelCase_ ) _a : Optional[Any] = np.array(lowerCAmelCase_ ) _a : Optional[Any] = en_sentvecs.shape[0] # mean centering _a : Dict = en_sentvecs - np.mean(lowerCAmelCase_ , axis=0 ) _a : Optional[Any] = in_sentvecs - np.mean(lowerCAmelCase_ , axis=0 ) _a : str = cdist(lowerCAmelCase_ , lowerCAmelCase_ , 'cosine' ) _a : str = np.array(range(lowerCAmelCase_ ) ) _a : Any = sim.argsort(axis=1 )[:, :10] _a : Optional[int] = np.any(preds == actual[:, None] , axis=1 ) return float(matches.mean() ) @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): def __lowercase ( self : Optional[int] ): if self.config_name not in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", "wiki-ner", ]: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]' ) return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('int64' ) if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32' ) ), 'references': datasets.Value('int64' ) if self.config_name != 'cvit-mkb-clsr' else datasets.Sequence(datasets.Value('float32' ) ), } ) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' if self.config_name != 'cvit-mkb-clsr' else None ,) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str ): if self.config_name == "cvit-mkb-clsr": return {"precision@10": precision_at_aa(_UpperCAmelCase ,_UpperCAmelCase )} elif self.config_name in ["wiki-ner"]: return acc_and_fa(_UpperCAmelCase ,_UpperCAmelCase ) elif self.config_name in [ "wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md", ]: return {"accuracy": simple_accuracy(_UpperCAmelCase ,_UpperCAmelCase )} else: raise KeyError( 'You should supply a configuration name selected in ' '["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", ' '"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", ' '"wiki-ner"]' )
89
'''simple docstring''' import math def __lowerCamelCase ( lowerCAmelCase_ ) -> bool: _a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int: _a : int = 0 _a : Optional[Any] = 0 _a : int = 3 while True: _a : Tuple = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(lowerCAmelCase_ ): _a : Union[str, Any] = int(lowerCAmelCase_ ) total_partitions += 1 if check_partition_perfect(lowerCAmelCase_ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(lowerCAmelCase_ ) integer += 1 if __name__ == "__main__": print(f"""{solution() = }""")
89
1
'''simple docstring''' import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __lowerCamelCase ( lowerCAmelCase_ ) -> List[str]: _a : str = checkpoints.load_tax_checkpoint(lowerCAmelCase_ ) _a : str = flatten_dict(lowerCAmelCase_ ) return flax_params def __lowerCamelCase ( lowerCAmelCase_ ) -> str: _a : Optional[Any] = {} _a : str = { 'token_embedder': 'embeddings', 'encoder_norm': 'layernorm', 'kernel': 'weight', '.out': '.output', 'scale': 'weight', 'embedders_0.pos_embedding': 'row_embedder.weight', 'embedders_1.pos_embedding': 'column_embedder.weight', } _a : str = { 'query': 'attention.query', 'key': 'attention.key', 'value': 'attention.value', 'output.dense': 'output', 'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o', 'pre_self_attention_layer_norm': 'self_attention.layer_norm', 'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm', 'mlp.': 'mlp.DenseReluDense.', 'pre_mlp_layer_norm': 'mlp.layer_norm', 'self_attention.o': 'self_attention.attention.o', 'decoder.embeddings.embedding': 'decoder.embed_tokens.weight', 'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight', 'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight', 'decoder.logits_dense.weight': 'decoder.lm_head.weight', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key _a : Dict = '.'.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): _a : Union[str, Any] = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): _a : int = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number _a : Union[str, Any] = re.sub(r'layers_(\d+)' , r'layer.\1' , lowerCAmelCase_ ) _a : Optional[int] = new_key.replace('encoder' , 'encoder.encoder' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number _a : Optional[int] = re.sub(r'layers_(\d+)' , r'layer.\1' , lowerCAmelCase_ ) _a : Optional[Any] = flax_dict[key] _a : int = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): _a : Any = torch.from_numpy(converted_dict[key].T ) else: _a : Optional[Any] = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Any: _a : Tuple = get_flax_param(lowerCAmelCase_ ) if not use_large: _a : Optional[Any] = PixaStructVisionConfig() _a : Union[str, Any] = PixaStructTextConfig() else: _a : List[Any] = PixaStructVisionConfig( hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 ) _a : List[Any] = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 ) _a : Any = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowerCAmelCase_ ) _a : Optional[Any] = PixaStructForConditionalGeneration(lowerCAmelCase_ ) _a : Union[str, Any] = rename_and_convert_flax_params(lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) _a : List[Any] = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' ) _a : Dict = PixaStructImageProcessor() _a : Dict = PixaStructProcessor(image_processor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) if use_large: _a : Optional[int] = 4096 _a : List[str] = True # mkdir if needed os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) print('Model saved in {}'.format(lowerCAmelCase_ ) ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''') parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''') __lowerCAmelCase = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
89
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple: _a : Any = [] for old_item in old_list: _a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' ) _a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' ) _a : str = new_item.replace('out_layers.0' , 'norm2' ) _a : List[str] = new_item.replace('out_layers.3' , 'conv2' ) _a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' ) _a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' ) _a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any: _a : List[str] = [] for old_item in old_list: _a : List[Any] = old_item _a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' ) _a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' ) _a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) _a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) _a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _a : Optional[Any] = old_checkpoint[path] _a : Optional[Any] = old_tensor.shape[0] // 3 _a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _a : int = old_tensor.shape[0] // config['num_head_channels'] // 3 _a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 ) _a : Dict = query.reshape(lowerCAmelCase_ ) _a : str = key.reshape(lowerCAmelCase_ ) _a : Optional[int] = value.reshape(lowerCAmelCase_ ) for path in paths: _a : Dict = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) _a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) _a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: _a : int = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _a : List[str] = old_checkpoint[path['old']][:, :, 0] else: _a : Dict = old_checkpoint[path['old']] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _a : Optional[int] = {} _a : Dict = checkpoint['time_embed.0.weight'] _a : Tuple = checkpoint['time_embed.0.bias'] _a : Union[str, Any] = checkpoint['time_embed.2.weight'] _a : List[str] = checkpoint['time_embed.2.bias'] _a : List[str] = checkpoint['input_blocks.0.0.weight'] _a : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] _a : Optional[int] = checkpoint['out.0.weight'] _a : int = checkpoint['out.0.bias'] _a : List[str] = checkpoint['out.2.weight'] _a : Optional[int] = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) _a : Dict = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the middle blocks only _a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) _a : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the output blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) _a : str = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } for i in range(1 , lowerCAmelCase_ ): _a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1) _a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1) _a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: _a : List[Any] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] _a : Union[str, Any] = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue _a : Any = renew_resnet_paths(lowerCAmelCase_ ) _a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} _a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ ) if len(lowerCAmelCase_ ): _a : List[str] = renew_attention_paths(lowerCAmelCase_ ) _a : List[Any] = { 'old': f"""input_blocks.{i}.1""", 'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : Optional[Any] = { f"""input_blocks.{i}.1.qkv.bias""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , ) _a : str = middle_blocks[0] _a : Tuple = middle_blocks[1] _a : Any = middle_blocks[2] _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : Any = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : int = renew_attention_paths(lowerCAmelCase_ ) _a : int = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ): _a : List[str] = i // (config['num_res_blocks'] + 1) _a : Any = i % (config['num_res_blocks'] + 1) _a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]] _a : Optional[Any] = {} for layer in output_block_layers: _a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(lowerCAmelCase_ ) else: _a : str = [layer_name] if len(lowerCAmelCase_ ) > 1: _a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] _a : Dict = renew_resnet_paths(lowerCAmelCase_ ) _a : str = renew_resnet_paths(lowerCAmelCase_ ) _a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) _a : Tuple = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] _a : List[str] = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(lowerCAmelCase_ ) == 2: _a : Union[str, Any] = [] if len(lowerCAmelCase_ ): _a : Tuple = renew_attention_paths(lowerCAmelCase_ ) _a : str = { 'old': f"""output_blocks.{i}.1""", 'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : List[Any] = { f"""output_blocks.{i}.1.qkv.bias""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , ) else: _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] ) _a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] ) _a : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: __lowerCAmelCase = json.loads(f.read()) __lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
89
1
'''simple docstring''' import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class __magic_name__ : @staticmethod def __lowercase ( *_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : str ): pass def __lowerCamelCase ( lowerCAmelCase_ ) -> str: _a : List[Any] = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def __lowerCamelCase ( lowerCAmelCase_ ) -> Dict: _a : Dict = np.array(lowerCAmelCase_ ) _a : List[Any] = npimg.shape return {"hash": hashimage(lowerCAmelCase_ ), "shape": shape} @is_pipeline_test @require_vision @require_torch class __magic_name__ ( unittest.TestCase ): lowerCAmelCase : List[str] = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) lowerCAmelCase : str = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def __lowercase ( self : Any ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ): _a : Union[str, Any] = MaskGenerationPipeline(model=_UpperCAmelCase ,image_processor=_UpperCAmelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Any ): pass @require_tf @unittest.skip('Image segmentation not implemented in TF' ) def __lowercase ( self : Any ): pass @slow @require_torch def __lowercase ( self : Optional[int] ): _a : int = pipeline('mask-generation' ,model='facebook/sam-vit-huge' ) _a : Union[str, Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' ,points_per_batch=256 ) # Shortening by hashing _a : Optional[Any] = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(_UpperCAmelCase ,decimals=4 ) ,[ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_21}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.99_67}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_93}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.99_09}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.98_79}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.98_34}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.97_16}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.96_12}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.95_99}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.95_52}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.95_32}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.95_16}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.94_99}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.94_83}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.94_64}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_43}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_43}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.94_08}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.93_35}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.93_26}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.92_62}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.89_99}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.89_86}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.89_84}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.88_73}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.88_71} ] ,) # fmt: on @require_torch @slow def __lowercase ( self : List[Any] ): _a : int = 'facebook/sam-vit-huge' _a : int = pipeline('mask-generation' ,model=_UpperCAmelCase ) _a : str = image_segmenter( 'http://images.cocodataset.org/val2017/000000039769.jpg' ,pred_iou_thresh=1 ,points_per_batch=256 ) # Shortening by hashing _a : Union[str, Any] = [] for i, o in enumerate(outputs['masks'] ): new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(_UpperCAmelCase ,decimals=4 ) ,[ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.02_10}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53}, ] ,)
89
'''simple docstring''' import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array: _a : Optional[int] = f"""{sampling_rate}""" _a : Any = '1' _a : Optional[int] = 'f32le' _a : Any = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: _a : int = ffmpeg_process.communicate(lowerCAmelCase_ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error _a : int = output_stream[0] _a : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "f32le" , ) -> Union[str, Any]: _a : List[str] = f"""{sampling_rate}""" _a : List[str] = '1' if format_for_conversion == "s16le": _a : List[Any] = 2 elif format_for_conversion == "f32le": _a : Dict = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) _a : Any = platform.system() if system == "Linux": _a : Union[str, Any] = 'alsa' _a : Union[str, Any] = 'default' elif system == "Darwin": _a : Any = 'avfoundation' _a : Optional[int] = ':0' elif system == "Windows": _a : str = 'dshow' _a : Tuple = 'default' _a : str = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] _a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _a : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ ) for item in iterator: yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "f32le" , ) -> str: if stream_chunk_s is not None: _a : str = stream_chunk_s else: _a : List[str] = chunk_length_s _a : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ ) if format_for_conversion == "s16le": _a : Optional[Any] = np.intaa _a : List[Any] = 2 elif format_for_conversion == "f32le": _a : Tuple = np.floataa _a : Any = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: _a : str = chunk_length_s / 6 _a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCAmelCase_ , (int, float) ): _a : List[str] = [stride_length_s, stride_length_s] _a : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _a : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _a : Any = datetime.datetime.now() _a : Dict = datetime.timedelta(seconds=lowerCAmelCase_ ) for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ): # Put everything back in numpy scale _a : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ ) _a : List[str] = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) _a : Union[str, Any] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[Any]: _a : Tuple = B'' _a , _a : str = stride if stride_left + stride_right >= chunk_len: raise ValueError( f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) _a : Optional[int] = 0 for raw in iterator: acc += raw if stream and len(lowerCAmelCase_ ) < chunk_len: _a : str = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCAmelCase_ ) >= chunk_len: # We are flushing the accumulator _a : Union[str, Any] = (_stride_left, stride_right) _a : Dict = {'raw': acc[:chunk_len], 'stride': stride} if stream: _a : List[str] = False yield item _a : int = stride_left _a : List[Any] = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCAmelCase_ ) > stride_left: _a : str = {'raw': acc, 'stride': (_stride_left, 0)} if stream: _a : str = False yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _a : Optional[Any] = 2**24 # 16Mo try: with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process: while True: _a : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
89
1
'''simple docstring''' import json import os import unittest from transformers.models.roc_bert.tokenization_roc_bert import ( VOCAB_FILES_NAMES, RoCBertBasicTokenizer, RoCBertTokenizer, RoCBertWordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Any = RoCBertTokenizer lowerCAmelCase : List[str] = None lowerCAmelCase : Dict = False lowerCAmelCase : Optional[int] = True lowerCAmelCase : int = filter_non_english def __lowercase ( self : Tuple ): super().setUp() _a : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd'] _a : Optional[Any] = {} _a : List[str] = {} for i, value in enumerate(_UpperCAmelCase ): _a : Optional[Any] = i _a : Optional[int] = i _a : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) _a : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['word_shape_file'] ) _a : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['word_pronunciation_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.word_shape_file ,'w' ,encoding='utf-8' ) as word_shape_writer: json.dump(_UpperCAmelCase ,_UpperCAmelCase ,ensure_ascii=_UpperCAmelCase ) with open(self.word_pronunciation_file ,'w' ,encoding='utf-8' ) as word_pronunciation_writer: json.dump(_UpperCAmelCase ,_UpperCAmelCase ,ensure_ascii=_UpperCAmelCase ) def __lowercase ( self : Tuple ): _a : int = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file ) _a : Optional[int] = tokenizer.tokenize('你好[SEP]你是谁' ) self.assertListEqual(_UpperCAmelCase ,['你', '好', '[SEP]', '你', '是', '谁'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_UpperCAmelCase ) ,[5, 6, 2, 5, 7, 8] ) self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_UpperCAmelCase ) ,[5, 6, 2, 5, 7, 8] ) def __lowercase ( self : Dict ): _a : int = RoCBertBasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] ) def __lowercase ( self : Union[str, Any] ): _a : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] ) def __lowercase ( self : Union[str, Any] ): _a : List[str] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] ) def __lowercase ( self : List[str] ): _a : Dict = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] ) def __lowercase ( self : str ): _a : Dict = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] ) def __lowercase ( self : Optional[Any] ): _a : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __lowercase ( self : Optional[Any] ): _a : List[str] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __lowercase ( self : Optional[int] ): _a : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __lowercase ( self : Any ): _a : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_UpperCAmelCase ,never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def __lowercase ( self : Dict ): _a : int = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] _a : Optional[int] = {} for i, token in enumerate(_UpperCAmelCase ): _a : List[str] = i _a : Tuple = RoCBertWordpieceTokenizer(vocab=_UpperCAmelCase ,unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) ,[] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] ) def __lowercase ( self : Tuple ): self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def __lowercase ( self : List[str] ): self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def __lowercase ( self : Optional[int] ): self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def __lowercase ( self : Optional[int] ): _a : List[str] = self.get_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] ) if self.test_rust_tokenizer: _a : List[Any] = self.get_rust_tokenizer() self.assertListEqual( [rust_tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] ) def __lowercase ( self : int ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : Optional[Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" _a : str = tokenizer_r.encode_plus( _UpperCAmelCase ,return_attention_mask=_UpperCAmelCase ,return_token_type_ids=_UpperCAmelCase ,return_offsets_mapping=_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,) _a : int = tokenizer_r.do_lower_case if hasattr(_UpperCAmelCase ,'do_lower_case' ) else False _a : Tuple = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] ) def __lowercase ( self : Dict ): _a : Tuple = ['的', '人', '有'] _a : List[Any] = ''.join(_UpperCAmelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : Dict = True _a : Optional[int] = self.tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : List[Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : Tuple = tokenizer_p.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : Optional[Any] = tokenizer_r.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : List[Any] = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase ) _a : Optional[int] = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : int = False _a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : Dict = self.tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : Any = tokenizer_r.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : str = tokenizer_p.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : Optional[Any] = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase ) _a : str = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase ) # it is expected that only the first Chinese character is not preceded by "##". _a : int = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_UpperCAmelCase ) ] self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) @slow def __lowercase ( self : str ): _a : int = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file ) _a : Optional[int] = tokenizer.encode('你好' ,add_special_tokens=_UpperCAmelCase ) _a : Any = tokenizer.encode('你是谁' ,add_special_tokens=_UpperCAmelCase ) _a : Tuple = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) _a : str = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ,_UpperCAmelCase ) assert encoded_sentence == [1] + text + [2] assert encoded_pair == [1] + text + [2] + text_a + [2] def __lowercase ( self : List[Any] ): _a : int = self.get_tokenizers(do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): _a : Optional[Any] = '你好,你是谁' _a : Optional[int] = tokenizer.tokenize(_UpperCAmelCase ) _a : int = tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) _a : List[str] = tokenizer.convert_tokens_to_shape_ids(_UpperCAmelCase ) _a : List[str] = tokenizer.convert_tokens_to_pronunciation_ids(_UpperCAmelCase ) _a : List[str] = tokenizer.prepare_for_model( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : Dict = tokenizer.encode_plus(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase )
89
'''simple docstring''' __lowerCAmelCase = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : List[Any] = set() # keep track of all the paths to be checked _a : Any = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _a : Tuple = queue.pop(0 ) # get the last node from the path _a : Tuple = path[-1] if node not in explored: _a : Optional[Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _a : Any = list(lowerCAmelCase_ ) new_path.append(lowerCAmelCase_ ) queue.append(lowerCAmelCase_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(lowerCAmelCase_ ) # in case there's no path between the 2 nodes return [] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _a : Optional[int] = [start] _a : Dict = set(lowerCAmelCase_ ) # Keep tab on distances from `start` node. _a : Dict = {start: 0, target: -1} while queue: _a : List[str] = queue.pop(0 ) if node == target: _a : Any = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(lowerCAmelCase_ ) queue.append(lowerCAmelCase_ ) _a : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
89
1
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __lowerCAmelCase = None __lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>''' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __lowerCAmelCase = [ np.dtype('''|b1'''), np.dtype('''|u1'''), np.dtype('''<u2'''), np.dtype('''>u2'''), np.dtype('''<i2'''), np.dtype('''>i2'''), np.dtype('''<u4'''), np.dtype('''>u4'''), np.dtype('''<i4'''), np.dtype('''>i4'''), np.dtype('''<f4'''), np.dtype('''>f4'''), np.dtype('''<f8'''), np.dtype('''>f8'''), ] @dataclass class __magic_name__ : lowerCAmelCase : bool = True lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "PIL.Image.Image" lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase ) def __call__( self : Union[str, Any] ): return self.pa_type def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Optional[Any] = np.array(_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": value, "bytes": None} elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": None, "bytes": value} elif isinstance(_UpperCAmelCase ,np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase ,PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(_UpperCAmelCase ) elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ): if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support decoding images, please install \'Pillow\'.' ) if token_per_repo_id is None: _a : Dict = {} _a , _a : str = value['path'], value['bytes'] if bytes_ is None: if path is None: raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" ) else: if is_local_path(_UpperCAmelCase ): _a : Any = PIL.Image.open(_UpperCAmelCase ) else: _a : List[Any] = path.split('::' )[-1] try: _a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id'] _a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase ) except ValueError: _a : int = None with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f: _a : Tuple = BytesIO(f.read() ) _a : Union[str, Any] = PIL.Image.open(bytes_ ) else: _a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def __lowercase ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value('binary' ), "path": Value('string' ), } ) def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): _a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) _a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: _a : Union[str, Any] = storage.field('bytes' ) else: _a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: _a : Union[str, Any] = storage.field('path' ) else: _a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_list(storage.type ): _a : List[str] = pa.array( [encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,) _a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays( [bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(_UpperCAmelCase : Tuple ): with xopen(_UpperCAmelCase ,'rb' ) as f: _a : int = f.read() return bytes_ _a : Any = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] ,type=pa.binary() ,) _a : Optional[Any] = pa.array( [os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,) _a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes: _a : Optional[int] = BytesIO() if image.format in list_image_compression_formats(): _a : Optional[Any] = image.format else: _a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF' image.save(lowerCAmelCase_ , format=lowerCAmelCase_ ) return buffer.getvalue() def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) _a : List[Any] = array.dtype _a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER _a : Union[str, Any] = dtype.kind _a : Union[str, Any] = dtype.itemsize _a : List[Any] = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _a : Optional[int] = np.dtype('|u1' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _a : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ ) _a : List[Any] = np.dtype(lowerCAmelCase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) _a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) ) return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if objs: _a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowerCAmelCase_ , np.ndarray ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] elif isinstance(lowerCAmelCase_ , PIL.Image.Image ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] else: return objs else: return objs
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwinForImageClassification''', '''SwinForMaskedImageModeling''', '''SwinModel''', '''SwinPreTrainedModel''', '''SwinBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSwinForImageClassification''', '''TFSwinForMaskedImageModeling''', '''TFSwinModel''', '''TFSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __lowerCAmelCase = logging.get_logger(__name__) class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = ['input_values', 'padding_mask'] def __init__( self : Optional[int] ,_UpperCAmelCase : int = 1 ,_UpperCAmelCase : int = 24000 ,_UpperCAmelCase : float = 0.0 ,_UpperCAmelCase : float = None ,_UpperCAmelCase : float = None ,**_UpperCAmelCase : str ,): super().__init__(feature_size=_UpperCAmelCase ,sampling_rate=_UpperCAmelCase ,padding_value=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Optional[Any] = chunk_length_s _a : str = overlap @property def __lowercase ( self : Union[str, Any] ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __lowercase ( self : Optional[int] ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self : Tuple ,_UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCAmelCase : Optional[Union[bool, str, PaddingStrategy]] = None ,_UpperCAmelCase : Optional[bool] = False ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[Union[str, TensorType]] = None ,_UpperCAmelCase : Optional[int] = None ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs _a : Union[str, Any] = True _a : Optional[int] = bool( isinstance(_UpperCAmelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: _a : Tuple = [np.asarray(_UpperCAmelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_UpperCAmelCase ,np.ndarray ): _a : List[str] = np.asarray(_UpperCAmelCase ,dtype=np.floataa ) elif isinstance(_UpperCAmelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): _a : Optional[int] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: _a : int = [np.asarray(_UpperCAmelCase ).T] # verify inputs are valid for idx, example in enumerate(_UpperCAmelCase ): if example.ndim > 2: raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" ) _a : Tuple = None _a : str = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: _a : Optional[Any] = min(array.shape[0] for array in raw_audio ) _a : Optional[int] = int(np.floor(max_length / self.chunk_stride ) ) _a : int = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: _a : str = max(array.shape[0] for array in raw_audio ) _a : Optional[int] = int(np.ceil(max_length / self.chunk_stride ) ) _a : Optional[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length _a : str = 'max_length' else: _a : Dict = input_values # normal padding on batch if padded_inputs is None: _a : Tuple = self.pad( _UpperCAmelCase ,max_length=_UpperCAmelCase ,truncation=_UpperCAmelCase ,padding=_UpperCAmelCase ,return_attention_mask=_UpperCAmelCase ,) if padding: _a : Optional[Any] = padded_inputs.pop('attention_mask' ) _a : Any = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: _a : Optional[int] = example[..., None] input_values.append(example.T ) _a : Optional[Any] = input_values if return_tensors is not None: _a : str = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs
89
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Optional[int] = BarthezTokenizer lowerCAmelCase : int = BarthezTokenizerFast lowerCAmelCase : Dict = True lowerCAmelCase : str = True def __lowercase ( self : List[Any] ): super().setUp() _a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase ) _a : Union[str, Any] = tokenizer def __lowercase ( self : Tuple ): _a : Optional[Any] = '<pad>' _a : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase ) def __lowercase ( self : str ): _a : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<s>' ) self.assertEqual(vocab_keys[1] ,'<pad>' ) self.assertEqual(vocab_keys[-1] ,'<mask>' ) self.assertEqual(len(_UpperCAmelCase ) ,101122 ) def __lowercase ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size ,101122 ) @require_torch def __lowercase ( self : Dict ): _a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _a : Dict = [0, 57, 3018, 70307, 91, 2] _a : Dict = self.tokenizer( _UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) self.assertEqual((2, 6) ,batch.input_ids.shape ) self.assertEqual((2, 6) ,batch.attention_mask.shape ) _a : Tuple = batch.input_ids.tolist()[0] self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): if not self.test_rust_tokenizer: return _a : str = self.get_tokenizer() _a : List[str] = self.get_rust_tokenizer() _a : Dict = 'I was born in 92000, and this is falsé.' _a : List[Any] = tokenizer.tokenize(_UpperCAmelCase ) _a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Union[str, Any] = self.get_rust_tokenizer() _a : Any = tokenizer.encode(_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) @slow def __lowercase ( self : Optional[int] ): # fmt: off _a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _a : Optional[Any] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
89
1
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''', '''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''', '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''', '''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''', '''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''', '''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''', '''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''', '''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''', '''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''', '''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''', '''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''', '''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''', } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = 'codegen' lowerCAmelCase : Optional[Any] = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Union[str, Any] ,_UpperCAmelCase : List[Any]=50400 ,_UpperCAmelCase : Dict=2048 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Union[str, Any]=4096 ,_UpperCAmelCase : List[Any]=28 ,_UpperCAmelCase : int=16 ,_UpperCAmelCase : Optional[Any]=64 ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : List[str]="gelu_new" ,_UpperCAmelCase : Union[str, Any]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Optional[int]=1E-5 ,_UpperCAmelCase : str=0.02 ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Any=50256 ,_UpperCAmelCase : int=50256 ,_UpperCAmelCase : Any=False ,**_UpperCAmelCase : List[str] ,): _a : Optional[Any] = vocab_size _a : Optional[int] = n_ctx _a : Dict = n_positions _a : int = n_embd _a : List[Any] = n_layer _a : Dict = n_head _a : Optional[int] = n_inner _a : Optional[Any] = rotary_dim _a : List[str] = activation_function _a : List[str] = resid_pdrop _a : List[str] = embd_pdrop _a : Union[str, Any] = attn_pdrop _a : List[str] = layer_norm_epsilon _a : Any = initializer_range _a : Any = use_cache _a : Any = bos_token_id _a : Dict = eos_token_id super().__init__( bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,tie_word_embeddings=_UpperCAmelCase ,**_UpperCAmelCase ) class __magic_name__ ( _UpperCamelCase ): def __init__( self : List[str] ,_UpperCAmelCase : PretrainedConfig ,_UpperCAmelCase : str = "default" ,_UpperCAmelCase : List[PatchingSpec] = None ,_UpperCAmelCase : bool = False ,): super().__init__(_UpperCAmelCase ,task=_UpperCAmelCase ,patching_specs=_UpperCAmelCase ,use_past=_UpperCAmelCase ) if not getattr(self._config ,'pad_token_id' ,_UpperCAmelCase ): # TODO: how to do that better? _a : Dict = 0 @property def __lowercase ( self : Tuple ): _a : List[Any] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(_UpperCAmelCase ,direction='inputs' ) _a : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'} else: _a : Any = {0: 'batch', 1: 'sequence'} return common_inputs @property def __lowercase ( self : Dict ): return self._config.n_layer @property def __lowercase ( self : Optional[int] ): return self._config.n_head def __lowercase ( self : int ,_UpperCAmelCase : PreTrainedTokenizer ,_UpperCAmelCase : int = -1 ,_UpperCAmelCase : int = -1 ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : Optional[TensorType] = None ,): _a : List[str] = super(_UpperCAmelCase ,self ).generate_dummy_inputs( _UpperCAmelCase ,batch_size=_UpperCAmelCase ,seq_length=_UpperCAmelCase ,is_pair=_UpperCAmelCase ,framework=_UpperCAmelCase ) # We need to order the input in the way they appears in the forward() _a : List[Any] = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch _a , _a : int = common_inputs['input_ids'].shape # Not using the same length for past_key_values _a : Optional[int] = seqlen + 2 _a : int = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _a : Any = [ (torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers ) ] _a : Optional[Any] = common_inputs['attention_mask'] if self.use_past: _a : str = ordered_inputs['attention_mask'].dtype _a : Dict = torch.cat( [ordered_inputs['attention_mask'], torch.ones(_UpperCAmelCase ,_UpperCAmelCase ,dtype=_UpperCAmelCase )] ,dim=1 ) return ordered_inputs @property def __lowercase ( self : Dict ): return 13
89
'''simple docstring''' import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __magic_name__ ( _UpperCamelCase ): @require_torch def __lowercase ( self : Tuple ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _a : List[Any] = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(_UpperCAmelCase ) BertModel.from_pretrained(_UpperCAmelCase ) BertTokenizer.from_pretrained(_UpperCAmelCase ) pipeline(task='fill-mask' ,model=_UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _a : Tuple = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : int = '1' _a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : Any ): # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _a : int = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(_UpperCAmelCase ) BertModel.from_pretrained(_UpperCAmelCase ) BertTokenizer.from_pretrained(_UpperCAmelCase ) pipeline(task='fill-mask' ,model=_UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _a : str = self.get_env() _a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : List[str] ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n ' _a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n ' _a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n ' # baseline - just load from_pretrained with normal network _a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _a : Dict = self.get_env() _a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) # next emulate no network _a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : int = '1' _a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : int ): _a : Optional[Any] = '\nfrom transformers import pipeline\n ' _a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n ' _a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n ' _a : List[Any] = self.get_env() _a : Dict = '1' _a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )] _a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,1 ,result.stderr ) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,) @require_torch def __lowercase ( self : int ): _a : Optional[int] = '\nfrom transformers import AutoModel\n ' _a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n ' # baseline - just load from_pretrained with normal network _a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _a : Tuple = self.get_env() _a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : Optional[Any] = '1' _a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() )
89
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ ) -> int: _a : Optional[int] = hex_num.strip() if not hex_num: raise ValueError('No value was passed to the function' ) _a : Dict = hex_num[0] == '-' if is_negative: _a : Optional[int] = hex_num[1:] try: _a : Optional[Any] = int(lowerCAmelCase_ , 16 ) except ValueError: raise ValueError('Invalid value was passed to the function' ) _a : int = '' while int_num > 0: _a : Union[str, Any] = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('-' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
89
'''simple docstring''' def __lowerCamelCase ( ) -> Tuple: for n in range(1 , 1000000 ): yield n * (n + 1) // 2 def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]: _a : Any = 1 _a : Tuple = 2 while i * i <= n: _a : Tuple = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def __lowerCamelCase ( ) -> str: return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 ) if __name__ == "__main__": print(solution())
89
1
'''simple docstring''' import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {'''vocab_file''': '''spiece.model'''} __lowerCAmelCase = { '''vocab_file''': { '''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''', } } class __magic_name__ ( _UpperCamelCase ): def __init__( self : Optional[int] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[Any]=False ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Optional[int]=False ,_UpperCAmelCase : Dict="<s>" ,_UpperCAmelCase : Tuple="</s>" ,_UpperCAmelCase : Dict="<unk>" ,_UpperCAmelCase : Dict="<sep>" ,_UpperCAmelCase : List[str]="<pad>" ,_UpperCAmelCase : List[Any]="<cls>" ,_UpperCAmelCase : Union[str, Any]="<mask>" ,_UpperCAmelCase : Optional[Any]=["<eop>", "<eod>"] ,_UpperCAmelCase : Optional[Dict[str, Any]] = None ,**_UpperCAmelCase : Union[str, Any] ,): _a : List[Any] = AddedToken(_UpperCAmelCase ,lstrip=_UpperCAmelCase ,rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else mask_token _a : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_UpperCAmelCase ,remove_space=_UpperCAmelCase ,keep_accents=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,additional_special_tokens=_UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,**_UpperCAmelCase ,) _a : List[Any] = 3 _a : Optional[int] = do_lower_case _a : Dict = remove_space _a : Union[str, Any] = keep_accents _a : Any = vocab_file _a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_UpperCAmelCase ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( 'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. ' 'See https://pypi.org/project/jieba/ for installation.' ) _a : Optional[int] = jieba _a : Union[str, Any] = str.maketrans(' \n' ,'\u2582\u2583' ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def __lowercase ( self : Dict ): return len(self.sp_model ) def __lowercase ( self : Dict ): _a : Optional[Any] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : int ): _a : Tuple = self.__dict__.copy() _a : Any = None return state def __setstate__( self : Any ,_UpperCAmelCase : Optional[int] ): _a : Any = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): _a : Optional[int] = {} _a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ): if self.remove_space: _a : List[str] = ' '.join(inputs.strip().split() ) else: _a : int = inputs _a : str = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' ) if not self.keep_accents: _a : Dict = unicodedata.normalize('NFKD' ,_UpperCAmelCase ) _a : List[str] = ''.join([c for c in outputs if not unicodedata.combining(_UpperCAmelCase )] ) if self.do_lower_case: _a : Dict = outputs.lower() return outputs def __lowercase ( self : List[str] ,_UpperCAmelCase : str ): _a : int = self.preprocess_text(_UpperCAmelCase ) _a : Dict = self.sp_model.encode(_UpperCAmelCase ,out_type=_UpperCAmelCase ) _a : Optional[Any] = [] for piece in pieces: if len(_UpperCAmelCase ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): _a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCAmelCase ,'' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _a : Optional[int] = cur_pieces[1:] else: _a : Optional[int] = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_UpperCAmelCase ) else: new_pieces.append(_UpperCAmelCase ) return new_pieces def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Union[str, Any] ): return self.sp_model.PieceToId(_UpperCAmelCase ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : List[str] ): return self.sp_model.IdToPiece(_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Any ): _a : Dict = ''.join(_UpperCAmelCase ).replace(_UpperCAmelCase ,' ' ).strip() return out_string def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ): _a : List[str] = [self.sep_token_id] _a : int = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is not None: return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] return ([0] * len(_UpperCAmelCase )) + [1, 1] def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ): _a : Union[str, Any] = [self.sep_token_id] _a : List[str] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __lowercase ( self : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _a : List[Any] = os.path.join( _UpperCAmelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase ,'wb' ) as fi: _a : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,) def __lowercase ( self : int ,*_UpperCAmelCase : Dict ,**_UpperCAmelCase : List[Any] ): _a : List[str] = super()._decode(*_UpperCAmelCase ,**_UpperCAmelCase ) _a : List[str] = text.replace(' ' ,'' ).replace('\u2582' ,' ' ).replace('\u2583' ,'\n' ) return text
89
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class __magic_name__ ( _UpperCamelCase ): def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,): super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Tuple = Sql( cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,) def __lowercase ( self : Dict ): _a : Optional[Any] = None _a : Dict = None _a : Dict = None _a : Optional[int] = None self.builder.download_and_prepare( download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,) # Build dataset for splits _a : List[str] = self.builder.as_dataset( split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory ) return dataset class __magic_name__ : def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _a : Dict = dataset _a : List[Any] = name _a : Tuple = con _a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _a : List[Any] = num_proc _a : Tuple = to_sql_kwargs def __lowercase ( self : List[Any] ): _a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase ) _a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase ) _a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase ) _a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs ) return written def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ): _a , _a , _a : Any = args _a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs _a : Dict = query_table( table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,) _a : Tuple = batch.to_pandas() _a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase ) return num_rows or len(_UpperCAmelCase ) def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ): _a : Union[str, Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _a , _a : List[Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += num_rows return written
89
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ) -> bool: _a : Dict = set() # Replace all the whitespace in our sentence _a : Union[str, Any] = input_str.replace(' ' , '' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(lowerCAmelCase_ ) == 26 def __lowerCamelCase ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ) -> bool: _a : str = [False] * 26 for char in input_str: if char.islower(): _a : List[str] = True elif char.isupper(): _a : str = True return all(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ = "The quick brown fox jumps over the lazy dog" , ) -> bool: return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def __lowerCamelCase ( ) -> None: from timeit import timeit _a : List[Any] = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest' print(timeit('is_pangram()' , setup=lowerCAmelCase_ ) ) print(timeit('is_pangram_faster()' , setup=lowerCAmelCase_ ) ) print(timeit('is_pangram_fastest()' , setup=lowerCAmelCase_ ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
89
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray: _a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ ) return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) ) if __name__ == "__main__": # read original image __lowerCAmelCase = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value __lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __lowerCAmelCase , __lowerCAmelCase = gray_img.shape # set different points to rotate image __lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __lowerCAmelCase = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __lowerCAmelCase = plt.figure(1) __lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
89
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ = 1000 ) -> int: _a , _a : Union[str, Any] = 1, 1 _a : Dict = 2 while True: _a : Any = 0 _a : Optional[Any] = fa + fa _a , _a : Union[str, Any] = fa, f index += 1 for _ in str(lowerCAmelCase_ ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ = 4000000 ) -> int: _a : Optional[Any] = [0, 1] _a : str = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 _a : List[Any] = 0 for j in range(len(lowerCAmelCase_ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f"""{solution() = }""")
89
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]: _a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) _a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ ) _a : List[str] = tok.pad_token_id def get_lens(lowerCAmelCase_ ): _a : Dict = tqdm( DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _a : Union[str, Any] = [] for batch in dl: _a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist() _a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ): max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) ) else: max_lens.extend(lowerCAmelCase_ ) return max_lens _a : str = get_lens(lowerCAmelCase_ ) _a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ ) _a : Dict = get_lens(lowerCAmelCase_ ) pickle_save(lowerCAmelCase_ , train_ds.len_file ) pickle_save(lowerCAmelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
89
1
'''simple docstring''' __lowerCAmelCase = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' __lowerCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] __lowerCAmelCase = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
89
'''simple docstring''' from typing import Any class __magic_name__ : def __init__( self : List[Any] ,_UpperCAmelCase : Any ): _a : List[Any] = data _a : Union[str, Any] = None def __repr__( self : Any ): return F"""Node({self.data})""" class __magic_name__ : def __init__( self : int ): _a : Tuple = None def __iter__( self : str ): _a : int = self.head while node: yield node.data _a : Union[str, Any] = node.next def __len__( self : Optional[Any] ): return sum(1 for _ in self ) def __repr__( self : str ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __getitem__( self : Tuple ,_UpperCAmelCase : int ): if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ): if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) _a : Any = self.head for _ in range(_UpperCAmelCase ): _a : Optional[Any] = current.next _a : Optional[int] = data def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ): self.insert_nth(len(self ) ,_UpperCAmelCase ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ): self.insert_nth(0 ,_UpperCAmelCase ) def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ): if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) _a : int = Node(_UpperCAmelCase ) if self.head is None: _a : str = new_node elif index == 0: _a : List[str] = self.head # link new_node to head _a : Union[str, Any] = new_node else: _a : int = self.head for _ in range(index - 1 ): _a : Union[str, Any] = temp.next _a : List[str] = temp.next _a : Optional[int] = new_node def __lowercase ( self : Optional[int] ): # print every node data print(self ) def __lowercase ( self : str ): return self.delete_nth(0 ) def __lowercase ( self : str ): # delete from tail return self.delete_nth(len(self ) - 1 ) def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ): if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) _a : Optional[Any] = self.head # default first node if index == 0: _a : int = self.head.next else: _a : int = self.head for _ in range(index - 1 ): _a : str = temp.next _a : str = temp.next _a : int = temp.next.next return delete_node.data def __lowercase ( self : List[Any] ): return self.head is None def __lowercase ( self : Tuple ): _a : List[Any] = None _a : Tuple = self.head while current: # Store the current node's next node. _a : Dict = current.next # Make the current node's next point backwards _a : str = prev # Make the previous node be the current node _a : Tuple = current # Make the current node the next node (to progress iteration) _a : Optional[Any] = next_node # Return prev in order to put the head at the end _a : int = prev def __lowerCamelCase ( ) -> None: _a : List[str] = LinkedList() assert linked_list.is_empty() is True assert str(lowerCAmelCase_ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(lowerCAmelCase_ ) == i linked_list.insert_nth(lowerCAmelCase_ , i + 1 ) assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(lowerCAmelCase_ ) == 9 assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): _a : Union[str, Any] = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) ) def __lowerCamelCase ( ) -> None: _a : Dict = [ -9, 100, Node(77345112 ), 'dlrow olleH', 7, 5555, 0, -192.55_555, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] _a : List[Any] = LinkedList() for i in test_input: linked_list.insert_tail(lowerCAmelCase_ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head _a : List[str] = linked_list.delete_head() assert result == -9 assert ( str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail _a : Dict = linked_list.delete_tail() assert result == 12.2 assert ( str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list _a : Optional[Any] = linked_list.delete_nth(10 ) assert result is None assert ( str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(lowerCAmelCase_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(lowerCAmelCase_ ) assert ( str(lowerCAmelCase_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(lowerCAmelCase_ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def __lowerCamelCase ( ) -> Union[str, Any]: from doctest import testmod testmod() _a : Optional[int] = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(lowerCAmelCase_ ) print('\nReading/changing Node data using indexing:' ) print(f"""Element at Position 1: {linked_list[1]}""" ) _a : Optional[Any] = input('Enter New Value: ' ).strip() print('New list:' ) print(lowerCAmelCase_ ) print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" ) if __name__ == "__main__": main()
89
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase = { '''configuration_distilbert''': [ '''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DistilBertConfig''', '''DistilBertOnnxConfig''', ], '''tokenization_distilbert''': ['''DistilBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ['''DistilBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DistilBertForMaskedLM''', '''DistilBertForMultipleChoice''', '''DistilBertForQuestionAnswering''', '''DistilBertForSequenceClassification''', '''DistilBertForTokenClassification''', '''DistilBertModel''', '''DistilBertPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDistilBertForMaskedLM''', '''TFDistilBertForMultipleChoice''', '''TFDistilBertForQuestionAnswering''', '''TFDistilBertForSequenceClassification''', '''TFDistilBertForTokenClassification''', '''TFDistilBertMainLayer''', '''TFDistilBertModel''', '''TFDistilBertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''FlaxDistilBertForMaskedLM''', '''FlaxDistilBertForMultipleChoice''', '''FlaxDistilBertForQuestionAnswering''', '''FlaxDistilBertForSequenceClassification''', '''FlaxDistilBertForTokenClassification''', '''FlaxDistilBertModel''', '''FlaxDistilBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __lowerCAmelCase = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,): _a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )] if identifier is not None: _a : str = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for n_ in n_identifier: _a : int = [file for file in files if n_ not in file] else: _a : Optional[Any] = [file for file in files if n_identifier not in file] _a : Dict = ignore_files or [] ignore_files.append('__init__.py' ) _a : List[str] = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' ,_UpperCAmelCase ) if only_modules: _a : Any = file.split('.' )[0] try: _a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict = doctest.DocTestSuite(_UpperCAmelCase ) _a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) ,0 ) except AttributeError: logger.info(F"""{module_identifier} is not a module.""" ) else: _a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed ,0 ) def __lowercase ( self : Union[str, Any] ): _a : Optional[Any] = Path('src/transformers' ) _a : Optional[Any] = 'modeling' _a : Union[str, Any] = [ 'modeling_ctrl.py', 'modeling_tf_ctrl.py', ] self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase ) def __lowercase ( self : int ): _a : str = Path('src/transformers' ) _a : List[str] = 'tokenization' self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ) def __lowercase ( self : int ): _a : Any = Path('src/transformers' ) _a : str = 'configuration' self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ) def __lowercase ( self : Dict ): _a : Tuple = Path('src/transformers' ) _a : Optional[int] = ['configuration', 'modeling', 'tokenization'] self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): _a : Union[str, Any] = Path('docs/source' ) _a : List[str] = ['favicon.ico'] self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
89
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int: _a , _a : int = len(lowerCAmelCase_ ), len(grid[0] ) if ( min(lowerCAmelCase_ , lowerCAmelCase_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _a : int = 0 count += depth_first_search(lowerCAmelCase_ , row + 1 , lowerCAmelCase_ , lowerCAmelCase_ ) count += depth_first_search(lowerCAmelCase_ , row - 1 , lowerCAmelCase_ , lowerCAmelCase_ ) count += depth_first_search(lowerCAmelCase_ , lowerCAmelCase_ , col + 1 , lowerCAmelCase_ ) count += depth_first_search(lowerCAmelCase_ , lowerCAmelCase_ , col - 1 , lowerCAmelCase_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
89
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) __lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]: for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: _a : List[Any] = model_type_to_module_name(lowerCAmelCase_ ) _a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' ) try: return getattr(lowerCAmelCase_ , lowerCAmelCase_ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _a : Dict = importlib.import_module('transformers' ) if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): return getattr(lowerCAmelCase_ , lowerCAmelCase_ ) return None def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple: _a : List[str] = get_file_from_repo( lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , ) if resolved_config_file is None: logger.info( 'Could not locate the image processor configuration file, will try to use the model config instead.' ) return {} with open(lowerCAmelCase_ , encoding='utf-8' ) as reader: return json.load(lowerCAmelCase_ ) class __magic_name__ : def __init__( self : List[str] ): raise EnvironmentError( 'AutoImageProcessor is designed to be instantiated ' 'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(_UpperCAmelCase ) def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ): _a : Any = kwargs.pop('config' ,_UpperCAmelCase ) _a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase ) _a : Any = True _a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase ) _a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase ) _a : int = None if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ): _a : Any = config_dict['auto_map']['AutoImageProcessor'] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: _a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase ) if feature_extractor_class is not None: logger.warning( 'Could not find image processor class in the image processor config or the model config. Loading' ' based on pattern matching with the model\'s feature extractor configuration.' ) _a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' ) if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ): _a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor'] _a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' ) logger.warning( 'Could not find image processor auto map in the image processor config or the model config.' ' Loading based on pattern matching with the model\'s feature extractor configuration.' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) # It could be in `config.image_processor_type`` _a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase ) if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map: _a : Union[str, Any] = config.auto_map['AutoImageProcessor'] if image_processor_class is not None: _a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase ) _a : List[str] = image_processor_auto_map is not None _a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING _a : Optional[int] = resolve_trust_remote_code( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) if has_remote_code and trust_remote_code: _a : Dict = get_class_from_dynamic_module( _UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase ) _a : int = kwargs.pop('code_revision' ,_UpperCAmelCase ) if os.path.isdir(_UpperCAmelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING: _a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )] return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) raise ValueError( F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """ F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ): IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
89
1
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input __lowerCAmelCase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def __lowerCamelCase ( ) -> Optional[int]: _a : List[Any] = _ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: _a : Any = get_sagemaker_input() else: _a : Optional[Any] = get_cluster_input() return config def __lowerCamelCase ( lowerCAmelCase_=None ) -> Any: if subparsers is not None: _a : Tuple = subparsers.add_parser('config' , description=lowerCAmelCase_ ) else: _a : List[Any] = argparse.ArgumentParser('Accelerate config command' , description=lowerCAmelCase_ ) parser.add_argument( '--config_file' , default=lowerCAmelCase_ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=lowerCAmelCase_ ) return parser def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[str, Any]: _a : Dict = get_user_input() if args.config_file is not None: _a : List[Any] = args.config_file else: if not os.path.isdir(lowerCAmelCase_ ): os.makedirs(lowerCAmelCase_ ) _a : str = default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(lowerCAmelCase_ ) else: config.to_yaml_file(lowerCAmelCase_ ) print(f"""accelerate configuration saved at {config_file}""" ) def __lowerCamelCase ( ) -> Any: _a : Union[str, Any] = config_command_parser() _a : List[Any] = parser.parse_args() config_command(lowerCAmelCase_ ) if __name__ == "__main__": main()
89
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __lowerCAmelCase = None __lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>''' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __lowerCAmelCase = [ np.dtype('''|b1'''), np.dtype('''|u1'''), np.dtype('''<u2'''), np.dtype('''>u2'''), np.dtype('''<i2'''), np.dtype('''>i2'''), np.dtype('''<u4'''), np.dtype('''>u4'''), np.dtype('''<i4'''), np.dtype('''>i4'''), np.dtype('''<f4'''), np.dtype('''>f4'''), np.dtype('''<f8'''), np.dtype('''>f8'''), ] @dataclass class __magic_name__ : lowerCAmelCase : bool = True lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "PIL.Image.Image" lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase ) def __call__( self : Union[str, Any] ): return self.pa_type def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Optional[Any] = np.array(_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": value, "bytes": None} elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": None, "bytes": value} elif isinstance(_UpperCAmelCase ,np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase ,PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(_UpperCAmelCase ) elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ): if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support decoding images, please install \'Pillow\'.' ) if token_per_repo_id is None: _a : Dict = {} _a , _a : str = value['path'], value['bytes'] if bytes_ is None: if path is None: raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" ) else: if is_local_path(_UpperCAmelCase ): _a : Any = PIL.Image.open(_UpperCAmelCase ) else: _a : List[Any] = path.split('::' )[-1] try: _a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id'] _a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase ) except ValueError: _a : int = None with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f: _a : Tuple = BytesIO(f.read() ) _a : Union[str, Any] = PIL.Image.open(bytes_ ) else: _a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def __lowercase ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value('binary' ), "path": Value('string' ), } ) def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): _a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) _a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: _a : Union[str, Any] = storage.field('bytes' ) else: _a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: _a : Union[str, Any] = storage.field('path' ) else: _a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_list(storage.type ): _a : List[str] = pa.array( [encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,) _a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays( [bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(_UpperCAmelCase : Tuple ): with xopen(_UpperCAmelCase ,'rb' ) as f: _a : int = f.read() return bytes_ _a : Any = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] ,type=pa.binary() ,) _a : Optional[Any] = pa.array( [os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,) _a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes: _a : Optional[int] = BytesIO() if image.format in list_image_compression_formats(): _a : Optional[Any] = image.format else: _a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF' image.save(lowerCAmelCase_ , format=lowerCAmelCase_ ) return buffer.getvalue() def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) _a : List[Any] = array.dtype _a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER _a : Union[str, Any] = dtype.kind _a : Union[str, Any] = dtype.itemsize _a : List[Any] = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _a : Optional[int] = np.dtype('|u1' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _a : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ ) _a : List[Any] = np.dtype(lowerCAmelCase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) _a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) ) return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if objs: _a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowerCAmelCase_ , np.ndarray ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] elif isinstance(lowerCAmelCase_ , PIL.Image.Image ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] else: return objs else: return objs
89
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : List[Any] = 'data2vec-text' def __init__( self : Tuple ,_UpperCAmelCase : int=30522 ,_UpperCAmelCase : Tuple=768 ,_UpperCAmelCase : Optional[int]=12 ,_UpperCAmelCase : List[str]=12 ,_UpperCAmelCase : Any=3072 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : Optional[Any]=0.1 ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : int=512 ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : str=0.02 ,_UpperCAmelCase : int=1E-12 ,_UpperCAmelCase : Tuple=1 ,_UpperCAmelCase : Tuple=0 ,_UpperCAmelCase : Any=2 ,_UpperCAmelCase : Any="absolute" ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : List[str]=None ,**_UpperCAmelCase : Dict ,): super().__init__(pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Optional[Any] = vocab_size _a : Dict = hidden_size _a : Tuple = num_hidden_layers _a : Optional[int] = num_attention_heads _a : Union[str, Any] = hidden_act _a : Optional[int] = intermediate_size _a : Optional[int] = hidden_dropout_prob _a : List[str] = attention_probs_dropout_prob _a : List[Any] = max_position_embeddings _a : str = type_vocab_size _a : Union[str, Any] = initializer_range _a : Optional[int] = layer_norm_eps _a : Optional[Any] = position_embedding_type _a : Dict = use_cache _a : Any = classifier_dropout class __magic_name__ ( _UpperCamelCase ): @property def __lowercase ( self : Union[str, Any] ): if self.task == "multiple-choice": _a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _a : Dict = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
89
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]: _a : Optional[int] = list(lowerCAmelCase_ ) _a : Optional[Any] = list(lowerCAmelCase_ ) _a : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if lista[i] != lista[i]: count += 1 _a : Optional[int] = '_' if count > 1: return False else: return "".join(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]: _a : Optional[int] = [] while True: _a : Any = ['$'] * len(lowerCAmelCase_ ) _a : List[str] = [] for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): _a : Optional[int] = compare_string(binary[i] , binary[j] ) if k is False: _a : Optional[Any] = '*' _a : Optional[Any] = '*' temp.append('X' ) for i in range(len(lowerCAmelCase_ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(lowerCAmelCase_ ) == 0: return pi _a : Any = list(set(lowerCAmelCase_ ) ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : int = [] for minterm in minterms: _a : Optional[int] = '' for _ in range(lowerCAmelCase_ ): _a : Union[str, Any] = str(minterm % 2 ) + string minterm //= 2 temp.append(lowerCAmelCase_ ) return temp def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool: _a : int = list(lowerCAmelCase_ ) _a : Union[str, Any] = list(lowerCAmelCase_ ) _a : str = 0 for i in range(len(lowerCAmelCase_ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : List[Any] = [] _a : Optional[Any] = [0] * len(lowerCAmelCase_ ) for i in range(len(chart[0] ) ): _a : Union[str, Any] = 0 _a : int = -1 for j in range(len(lowerCAmelCase_ ) ): if chart[j][i] == 1: count += 1 _a : int = j if count == 1: _a : List[Any] = 1 for i in range(len(lowerCAmelCase_ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(lowerCAmelCase_ ) ): _a : Any = 0 temp.append(prime_implicants[i] ) while True: _a : Union[str, Any] = 0 _a : List[Any] = -1 _a : str = 0 for i in range(len(lowerCAmelCase_ ) ): _a : Union[str, Any] = chart[i].count(1 ) if count_n > max_n: _a : Any = count_n _a : int = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(lowerCAmelCase_ ) ): _a : List[str] = 0 def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]: _a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )] for i in range(len(lowerCAmelCase_ ) ): _a : str = prime_implicants[i].count('_' ) for j in range(len(lowerCAmelCase_ ) ): if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ): _a : Optional[Any] = 1 return chart def __lowerCamelCase ( ) -> None: _a : Optional[int] = int(input('Enter the no. of variables\n' ) ) _a : List[Any] = [ float(lowerCAmelCase_ ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] _a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ ) _a : Dict = check(lowerCAmelCase_ ) print('Prime Implicants are:' ) print(lowerCAmelCase_ ) _a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ ) _a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ ) print('Essential Prime Implicants are:' ) print(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
89
1
'''simple docstring''' import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def __lowerCamelCase ( lowerCAmelCase_ ) -> tuple: return (data["data"], data["target"]) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> XGBClassifier: _a : int = XGBClassifier() classifier.fit(lowerCAmelCase_ , lowerCAmelCase_ ) return classifier def __lowerCamelCase ( ) -> None: _a : Optional[Any] = load_iris() _a , _a : List[Any] = data_handling(lowerCAmelCase_ ) _a , _a , _a , _a : int = train_test_split( lowerCAmelCase_ , lowerCAmelCase_ , test_size=0.25 ) _a : str = iris['target_names'] # Create an XGBoost Classifier from the training data _a : Any = xgboost(lowerCAmelCase_ , lowerCAmelCase_ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , display_labels=lowerCAmelCase_ , cmap='Blues' , normalize='true' , ) plt.title('Normalized Confusion Matrix - IRIS Dataset' ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
89
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase = { '''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''], '''tokenization_cpmant''': ['''CpmAntTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CpmAntForCausalLM''', '''CpmAntModel''', '''CpmAntPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' def __lowerCamelCase ( ) -> str: _a : Optional[Any] = [] _a : Optional[Any] = 1 while len(lowerCAmelCase_ ) < 1E6: constant.append(str(lowerCAmelCase_ ) ) i += 1 _a : List[Any] = ''.join(lowerCAmelCase_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
89
'''simple docstring''' import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : str = LayoutLMTokenizer lowerCAmelCase : Tuple = LayoutLMTokenizerFast lowerCAmelCase : List[Any] = True lowerCAmelCase : int = True def __lowercase ( self : Dict ): super().setUp() _a : int = [ '[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ): _a : Optional[int] = 'UNwant\u00E9d,running' _a : List[Any] = 'unwanted, running' return input_text, output_text def __lowercase ( self : Optional[int] ): _a : Optional[Any] = self.tokenizer_class(self.vocab_file ) _a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] ) def __lowercase ( self : Optional[int] ): pass
89
1
'''simple docstring''' import os def __lowerCamelCase ( ) -> int: with open(os.path.dirname(lowerCAmelCase_ ) + '/grid.txt' ) as f: _a : Union[str, Any] = [] # noqa: E741 for _ in range(20 ): l.append([int(lowerCAmelCase_ ) for x in f.readline().split()] ) _a : Any = 0 # right for i in range(20 ): for j in range(17 ): _a : Any = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: _a : int = temp # down for i in range(17 ): for j in range(20 ): _a : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: _a : Optional[int] = temp # diagonal 1 for i in range(17 ): for j in range(17 ): _a : Tuple = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: _a : Union[str, Any] = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): _a : Optional[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: _a : Optional[int] = temp return maximum if __name__ == "__main__": print(solution())
89
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Any = 'conditional_detr' lowerCAmelCase : List[str] = ['past_key_values'] lowerCAmelCase : Optional[int] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,): if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) _a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : str = backbone_config.get('model_type' ) _a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] _a : List[Any] = config_class.from_dict(_UpperCAmelCase ) _a : Tuple = use_timm_backbone _a : Union[str, Any] = backbone_config _a : List[Any] = num_channels _a : Union[str, Any] = num_queries _a : Optional[Any] = d_model _a : Tuple = encoder_ffn_dim _a : Dict = encoder_layers _a : List[str] = encoder_attention_heads _a : Union[str, Any] = decoder_ffn_dim _a : Optional[int] = decoder_layers _a : int = decoder_attention_heads _a : Optional[int] = dropout _a : Tuple = attention_dropout _a : List[Any] = activation_dropout _a : str = activation_function _a : Optional[Any] = init_std _a : Union[str, Any] = init_xavier_std _a : List[Any] = encoder_layerdrop _a : List[Any] = decoder_layerdrop _a : Dict = encoder_layers _a : List[Any] = auxiliary_loss _a : Optional[int] = position_embedding_type _a : List[Any] = backbone _a : Optional[int] = use_pretrained_backbone _a : Optional[int] = dilation # Hungarian matcher _a : Tuple = class_cost _a : str = bbox_cost _a : Any = giou_cost # Loss coefficients _a : Tuple = mask_loss_coefficient _a : Dict = dice_loss_coefficient _a : Tuple = cls_loss_coefficient _a : Any = bbox_loss_coefficient _a : Dict = giou_loss_coefficient _a : Union[str, Any] = focal_alpha super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase ) @property def __lowercase ( self : Dict ): return self.encoder_attention_heads @property def __lowercase ( self : str ): return self.d_model def __lowercase ( self : int ): _a : List[str] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _a : Dict = self.backbone_config.to_dict() _a : Union[str, Any] = self.__class__.model_type return output class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = version.parse('1.11' ) @property def __lowercase ( self : Dict ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ] ) @property def __lowercase ( self : Any ): return 1E-5 @property def __lowercase ( self : List[Any] ): return 12
89
1
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller __lowerCAmelCase = 3 def __lowerCamelCase ( lowerCAmelCase_ ) -> int: print('Generating primitive root of p' ) while True: _a : str = random.randrange(3 , lowerCAmelCase_ ) if pow(lowerCAmelCase_ , 2 , lowerCAmelCase_ ) == 1: continue if pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) == 1: continue return g def __lowerCamelCase ( lowerCAmelCase_ ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: print('Generating prime p...' ) _a : Optional[Any] = rabin_miller.generate_large_prime(lowerCAmelCase_ ) # select large prime number. _a : List[Any] = primitive_root(lowerCAmelCase_ ) # one primitive root on modulo p. _a : Tuple = random.randrange(3 , lowerCAmelCase_ ) # private_key -> have to be greater than 2 for safety. _a : Union[str, Any] = cryptomath.find_mod_inverse(pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ ) _a : Any = (key_size, e_a, e_a, p) _a : Dict = (key_size, d) return public_key, private_key def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None: if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ): print('\nWARNING:' ) print( f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" 'Use a different name or delete these files and re-run this program.' ) sys.exit() _a , _a : List[str] = generate_key(lowerCAmelCase_ ) print(f"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(f"""{name}_pubkey.txt""" , 'w' ) as fo: fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(f"""Writing private key to file {name}_privkey.txt...""" ) with open(f"""{name}_privkey.txt""" , 'w' ) as fo: fo.write(f"""{private_key[0]},{private_key[1]}""" ) def __lowerCamelCase ( ) -> None: print('Making key files...' ) make_key_files('elgamal' , 2048 ) print('Key files generation successful' ) if __name__ == "__main__": main()
89
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __magic_name__ : def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,): _a : Optional[Any] = parent _a : List[Any] = batch_size _a : str = image_size _a : Union[str, Any] = num_channels _a : List[Any] = num_stages _a : Dict = hidden_sizes _a : int = depths _a : Tuple = is_training _a : List[str] = use_labels _a : Dict = intermediate_size _a : int = hidden_act _a : int = num_labels _a : Any = initializer_range _a : Tuple = out_features _a : int = out_indices _a : List[Any] = scope def __lowercase ( self : Dict ): _a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : Union[str, Any] = None if self.use_labels: _a : Tuple = ids_tensor([self.batch_size] ,self.num_labels ) _a : str = self.get_config() return config, pixel_values, labels def __lowercase ( self : Any ): return ConvNextVaConfig( num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ): _a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Any = model(_UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ): _a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ): _a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Dict = model(_UpperCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] ) # verify backbone works with out_features=None _a : Tuple = None _a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def __lowercase ( self : Optional[Any] ): _a : Any = self.prepare_config_and_inputs() _a , _a , _a : Union[str, Any] = config_and_inputs _a : Any = {'pixel_values': pixel_values} return config, inputs_dict def __lowercase ( self : str ): _a : Tuple = self.prepare_config_and_inputs() _a , _a , _a : Tuple = config_and_inputs _a : List[Any] = {'pixel_values': pixel_values, 'labels': labels} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : str = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCAmelCase : str = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : int = False lowerCAmelCase : str = False lowerCAmelCase : Optional[Any] = False lowerCAmelCase : List[str] = False lowerCAmelCase : Optional[int] = False def __lowercase ( self : List[Any] ): _a : str = ConvNextVaModelTester(self ) _a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 ) def __lowercase ( self : Optional[Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowercase ( self : str ): return @unittest.skip(reason='ConvNextV2 does not use inputs_embeds' ) def __lowercase ( self : List[Any] ): pass @unittest.skip(reason='ConvNextV2 does not support input and output embeddings' ) def __lowercase ( self : Optional[int] ): pass @unittest.skip(reason='ConvNextV2 does not use feedforward chunking' ) def __lowercase ( self : Any ): pass def __lowercase ( self : List[str] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Any = True if model_class.__name__ in [ *get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase ), ]: continue _a : Optional[Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() _a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : Optional[int] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : str ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Optional[int] = False _a : Tuple = True if ( model_class.__name__ in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue _a : Tuple = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.gradient_checkpointing_enable() model.train() _a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : List[Any] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : List[Any] ): _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = model_class(_UpperCAmelCase ) _a : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Dict = [*signature.parameters.keys()] _a : int = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_UpperCAmelCase ) def __lowercase ( self : int ): _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def __lowercase ( self : Any ): def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ): _a : Union[str, Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) ) _a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _a : str = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) _a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a : Optional[Any] = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : List[Any] ): _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def __lowercase ( self : int ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __lowerCamelCase ( ) -> List[Any]: _a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): @cached_property def __lowercase ( self : Optional[Any] ): return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None @slow def __lowercase ( self : Any ): _a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase ) _a : Optional[int] = self.default_image_processor _a : str = prepare_img() _a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _a : Dict = model(**_UpperCAmelCase ) # verify the logits _a : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,_UpperCAmelCase ) _a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
89
1
'''simple docstring''' from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def __lowerCamelCase ( lowerCAmelCase_ = True , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict: if not is_tqdm_available(): raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' ) _a : Tuple = False if main_process_only: _a : str = PartialState().local_process_index == 0 return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' __lowerCAmelCase = tuple[float, float, float] __lowerCAmelCase = tuple[float, float, float] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Vectorad: _a : str = end_pointa[0] - end_pointa[0] _a : Optional[int] = end_pointa[1] - end_pointa[1] _a : str = end_pointa[2] - end_pointa[2] return (x, y, z) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Vectorad: _a : Optional[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i _a : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _a : Dict = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> bool: return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ) -> bool: _a : Optional[int] = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) _a : List[str] = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
89
'''simple docstring''' import math def __lowerCamelCase ( lowerCAmelCase_ ) -> bool: _a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int: _a : int = 0 _a : Optional[Any] = 0 _a : int = 3 while True: _a : Tuple = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(lowerCAmelCase_ ): _a : Union[str, Any] = int(lowerCAmelCase_ ) total_partitions += 1 if check_partition_perfect(lowerCAmelCase_ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(lowerCAmelCase_ ) integer += 1 if __name__ == "__main__": print(f"""{solution() = }""")
89
1
'''simple docstring''' import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Tuple = MobileBertTokenizer lowerCAmelCase : Any = MobileBertTokenizerFast lowerCAmelCase : Tuple = True lowerCAmelCase : Tuple = True lowerCAmelCase : List[str] = filter_non_english lowerCAmelCase : Tuple = 'google/mobilebert-uncased' def __lowercase ( self : List[str] ): super().setUp() _a : Any = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _a : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) _a : Optional[int] = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def __lowercase ( self : Dict ,_UpperCAmelCase : List[Any] ): _a : Union[str, Any] = 'UNwant\u00E9d,running' _a : List[Any] = 'unwanted, running' return input_text, output_text def __lowercase ( self : Any ): _a : List[str] = self.tokenizer_class(self.vocab_file ) _a : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[9, 6, 7, 12, 10, 11] ) def __lowercase ( self : Dict ): if not self.test_rust_tokenizer: return _a : Dict = self.get_tokenizer() _a : Optional[Any] = self.get_rust_tokenizer() _a : List[Any] = 'UNwant\u00E9d,running' _a : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase ) _a : str = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Any = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : List[str] = self.get_rust_tokenizer() _a : Tuple = tokenizer.encode(_UpperCAmelCase ) _a : List[Any] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) # With lower casing _a : List[str] = self.get_tokenizer(do_lower_case=_UpperCAmelCase ) _a : Tuple = self.get_rust_tokenizer(do_lower_case=_UpperCAmelCase ) _a : int = 'UNwant\u00E9d,running' _a : Optional[Any] = tokenizer.tokenize(_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : int = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : List[str] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : str = self.get_rust_tokenizer() _a : Tuple = tokenizer.encode(_UpperCAmelCase ) _a : Tuple = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : List[str] ): _a : Any = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] ) def __lowercase ( self : Tuple ): _a : str = BasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] ) def __lowercase ( self : Optional[Any] ): _a : Optional[int] = BasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] ) def __lowercase ( self : Any ): _a : str = BasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] ) def __lowercase ( self : str ): _a : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] ) def __lowercase ( self : Union[str, Any] ): _a : List[str] = BasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __lowercase ( self : Optional[Any] ): _a : List[str] = BasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __lowercase ( self : List[str] ): _a : Any = BasicTokenizer(do_lower_case=_UpperCAmelCase ,strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def __lowercase ( self : List[str] ): _a : List[str] = BasicTokenizer(do_lower_case=_UpperCAmelCase ,never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def __lowercase ( self : Optional[Any] ): _a : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] _a : Dict = {} for i, token in enumerate(_UpperCAmelCase ): _a : Tuple = i _a : Union[str, Any] = WordpieceTokenizer(vocab=_UpperCAmelCase ,unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) ,[] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] ) def __lowercase ( self : Optional[Any] ): self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def __lowercase ( self : int ): self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def __lowercase ( self : str ): self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def __lowercase ( self : int ): _a : List[str] = self.get_tokenizer() _a : int = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(_UpperCAmelCase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] ) @slow def __lowercase ( self : Dict ): _a : List[Any] = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' ) _a : Any = tokenizer.encode('sequence builders' ,add_special_tokens=_UpperCAmelCase ) _a : Tuple = tokenizer.encode('multi-sequence build' ,add_special_tokens=_UpperCAmelCase ) _a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) _a : Tuple = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ,_UpperCAmelCase ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def __lowercase ( self : Optional[Any] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : int = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : Union[str, Any] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" _a : List[str] = tokenizer_r.encode_plus( _UpperCAmelCase ,return_attention_mask=_UpperCAmelCase ,return_token_type_ids=_UpperCAmelCase ,return_offsets_mapping=_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,) _a : Optional[int] = tokenizer_r.do_lower_case if hasattr(_UpperCAmelCase ,'do_lower_case' ) else False _a : Dict = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] ) def __lowercase ( self : Optional[Any] ): _a : List[str] = ['的', '人', '有'] _a : str = ''.join(_UpperCAmelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : Optional[int] = True _a : Dict = self.tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : Optional[int] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : Tuple = tokenizer_p.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : int = tokenizer_r.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : int = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase ) _a : Dict = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Any = False _a : Tuple = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : List[Any] = self.tokenizer_class.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) _a : Union[str, Any] = tokenizer_r.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : Union[str, Any] = tokenizer_p.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : str = tokenizer_r.convert_ids_to_tokens(_UpperCAmelCase ) _a : List[str] = tokenizer_p.convert_ids_to_tokens(_UpperCAmelCase ) # it is expected that only the first Chinese character is not preceded by "##". _a : int = [ F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_UpperCAmelCase ) ] self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
89
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple: _a : Any = [] for old_item in old_list: _a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' ) _a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' ) _a : str = new_item.replace('out_layers.0' , 'norm2' ) _a : List[str] = new_item.replace('out_layers.3' , 'conv2' ) _a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' ) _a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' ) _a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any: _a : List[str] = [] for old_item in old_list: _a : List[Any] = old_item _a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' ) _a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' ) _a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) _a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) _a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _a : Optional[Any] = old_checkpoint[path] _a : Optional[Any] = old_tensor.shape[0] // 3 _a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _a : int = old_tensor.shape[0] // config['num_head_channels'] // 3 _a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 ) _a : Dict = query.reshape(lowerCAmelCase_ ) _a : str = key.reshape(lowerCAmelCase_ ) _a : Optional[int] = value.reshape(lowerCAmelCase_ ) for path in paths: _a : Dict = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) _a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) _a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: _a : int = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _a : List[str] = old_checkpoint[path['old']][:, :, 0] else: _a : Dict = old_checkpoint[path['old']] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _a : Optional[int] = {} _a : Dict = checkpoint['time_embed.0.weight'] _a : Tuple = checkpoint['time_embed.0.bias'] _a : Union[str, Any] = checkpoint['time_embed.2.weight'] _a : List[str] = checkpoint['time_embed.2.bias'] _a : List[str] = checkpoint['input_blocks.0.0.weight'] _a : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] _a : Optional[int] = checkpoint['out.0.weight'] _a : int = checkpoint['out.0.bias'] _a : List[str] = checkpoint['out.2.weight'] _a : Optional[int] = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) _a : Dict = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the middle blocks only _a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) _a : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the output blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) _a : str = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } for i in range(1 , lowerCAmelCase_ ): _a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1) _a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1) _a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: _a : List[Any] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] _a : Union[str, Any] = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue _a : Any = renew_resnet_paths(lowerCAmelCase_ ) _a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} _a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ ) if len(lowerCAmelCase_ ): _a : List[str] = renew_attention_paths(lowerCAmelCase_ ) _a : List[Any] = { 'old': f"""input_blocks.{i}.1""", 'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : Optional[Any] = { f"""input_blocks.{i}.1.qkv.bias""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , ) _a : str = middle_blocks[0] _a : Tuple = middle_blocks[1] _a : Any = middle_blocks[2] _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : Any = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : int = renew_attention_paths(lowerCAmelCase_ ) _a : int = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ): _a : List[str] = i // (config['num_res_blocks'] + 1) _a : Any = i % (config['num_res_blocks'] + 1) _a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]] _a : Optional[Any] = {} for layer in output_block_layers: _a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(lowerCAmelCase_ ) else: _a : str = [layer_name] if len(lowerCAmelCase_ ) > 1: _a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] _a : Dict = renew_resnet_paths(lowerCAmelCase_ ) _a : str = renew_resnet_paths(lowerCAmelCase_ ) _a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) _a : Tuple = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] _a : List[str] = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(lowerCAmelCase_ ) == 2: _a : Union[str, Any] = [] if len(lowerCAmelCase_ ): _a : Tuple = renew_attention_paths(lowerCAmelCase_ ) _a : str = { 'old': f"""output_blocks.{i}.1""", 'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : List[Any] = { f"""output_blocks.{i}.1.qkv.bias""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , ) else: _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] ) _a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] ) _a : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: __lowerCAmelCase = json.loads(f.read()) __lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
89
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCAmelCase = { '''configuration_owlvit''': [ '''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OwlViTConfig''', '''OwlViTOnnxConfig''', '''OwlViTTextConfig''', '''OwlViTVisionConfig''', ], '''processing_owlvit''': ['''OwlViTProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ['''OwlViTFeatureExtractor'''] __lowerCAmelCase = ['''OwlViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OwlViTModel''', '''OwlViTPreTrainedModel''', '''OwlViTTextModel''', '''OwlViTVisionModel''', '''OwlViTForObjectDetection''', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
'''simple docstring''' import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array: _a : Optional[int] = f"""{sampling_rate}""" _a : Any = '1' _a : Optional[int] = 'f32le' _a : Any = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: _a : int = ffmpeg_process.communicate(lowerCAmelCase_ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error _a : int = output_stream[0] _a : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "f32le" , ) -> Union[str, Any]: _a : List[str] = f"""{sampling_rate}""" _a : List[str] = '1' if format_for_conversion == "s16le": _a : List[Any] = 2 elif format_for_conversion == "f32le": _a : Dict = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) _a : Any = platform.system() if system == "Linux": _a : Union[str, Any] = 'alsa' _a : Union[str, Any] = 'default' elif system == "Darwin": _a : Any = 'avfoundation' _a : Optional[int] = ':0' elif system == "Windows": _a : str = 'dshow' _a : Tuple = 'default' _a : str = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] _a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _a : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ ) for item in iterator: yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "f32le" , ) -> str: if stream_chunk_s is not None: _a : str = stream_chunk_s else: _a : List[str] = chunk_length_s _a : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ ) if format_for_conversion == "s16le": _a : Optional[Any] = np.intaa _a : List[Any] = 2 elif format_for_conversion == "f32le": _a : Tuple = np.floataa _a : Any = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: _a : str = chunk_length_s / 6 _a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCAmelCase_ , (int, float) ): _a : List[str] = [stride_length_s, stride_length_s] _a : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _a : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _a : Any = datetime.datetime.now() _a : Dict = datetime.timedelta(seconds=lowerCAmelCase_ ) for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ): # Put everything back in numpy scale _a : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ ) _a : List[str] = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) _a : Union[str, Any] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[Any]: _a : Tuple = B'' _a , _a : str = stride if stride_left + stride_right >= chunk_len: raise ValueError( f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) _a : Optional[int] = 0 for raw in iterator: acc += raw if stream and len(lowerCAmelCase_ ) < chunk_len: _a : str = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCAmelCase_ ) >= chunk_len: # We are flushing the accumulator _a : Union[str, Any] = (_stride_left, stride_right) _a : Dict = {'raw': acc[:chunk_len], 'stride': stride} if stream: _a : List[str] = False yield item _a : int = stride_left _a : List[Any] = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCAmelCase_ ) > stride_left: _a : str = {'raw': acc, 'stride': (_stride_left, 0)} if stream: _a : str = False yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _a : Optional[Any] = 2**24 # 16Mo try: with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process: while True: _a : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
89
1
'''simple docstring''' from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} __lowerCAmelCase = { '''vocab_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json''' }, '''merges_file''': { '''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt''' }, } __lowerCAmelCase = {'''allegro/herbert-base-cased''': 514} __lowerCAmelCase = {} class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = VOCAB_FILES_NAMES lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : Any = HerbertTokenizer def __init__( self : Dict ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : int=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Optional[int]="<s>" ,_UpperCAmelCase : str="<unk>" ,_UpperCAmelCase : Dict="<pad>" ,_UpperCAmelCase : List[Any]="<mask>" ,_UpperCAmelCase : Optional[int]="</s>" ,**_UpperCAmelCase : Union[str, Any] ,): super().__init__( _UpperCAmelCase ,_UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,**_UpperCAmelCase ,) def __lowercase ( self : str ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ): _a : List[Any] = [self.cls_token_id] _a : Union[str, Any] = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1] def __lowercase ( self : int ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ): _a : Tuple = [self.sep_token_id] _a : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowercase ( self : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ): _a : Union[str, Any] = self._tokenizer.model.save(_UpperCAmelCase ,name=_UpperCAmelCase ) return tuple(_UpperCAmelCase )
89
'''simple docstring''' __lowerCAmelCase = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : List[Any] = set() # keep track of all the paths to be checked _a : Any = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _a : Tuple = queue.pop(0 ) # get the last node from the path _a : Tuple = path[-1] if node not in explored: _a : Optional[Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _a : Any = list(lowerCAmelCase_ ) new_path.append(lowerCAmelCase_ ) queue.append(lowerCAmelCase_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(lowerCAmelCase_ ) # in case there's no path between the 2 nodes return [] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _a : Optional[int] = [start] _a : Dict = set(lowerCAmelCase_ ) # Keep tab on distances from `start` node. _a : Dict = {start: 0, target: -1} while queue: _a : List[str] = queue.pop(0 ) if node == target: _a : Any = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(lowerCAmelCase_ ) queue.append(lowerCAmelCase_ ) _a : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
89
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''andreasmadsen/efficient_mlm_m0.40''': ( '''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json''' ), } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : int = 'roberta-prelayernorm' def __init__( self : int ,_UpperCAmelCase : Any=50265 ,_UpperCAmelCase : int=768 ,_UpperCAmelCase : Any=12 ,_UpperCAmelCase : Union[str, Any]=12 ,_UpperCAmelCase : Tuple=3072 ,_UpperCAmelCase : List[str]="gelu" ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : str=0.1 ,_UpperCAmelCase : Tuple=512 ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : List[Any]=0.02 ,_UpperCAmelCase : Any=1E-12 ,_UpperCAmelCase : List[str]=1 ,_UpperCAmelCase : List[Any]=0 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : int="absolute" ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Tuple=None ,**_UpperCAmelCase : int ,): super().__init__(pad_token_id=_UpperCAmelCase ,bos_token_id=_UpperCAmelCase ,eos_token_id=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Union[str, Any] = vocab_size _a : Dict = hidden_size _a : int = num_hidden_layers _a : List[str] = num_attention_heads _a : Union[str, Any] = hidden_act _a : int = intermediate_size _a : Optional[int] = hidden_dropout_prob _a : Optional[int] = attention_probs_dropout_prob _a : str = max_position_embeddings _a : List[str] = type_vocab_size _a : List[Any] = initializer_range _a : Dict = layer_norm_eps _a : Optional[int] = position_embedding_type _a : int = use_cache _a : int = classifier_dropout class __magic_name__ ( _UpperCamelCase ): @property def __lowercase ( self : Union[str, Any] ): if self.task == "multiple-choice": _a : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _a : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwinForImageClassification''', '''SwinForMaskedImageModeling''', '''SwinModel''', '''SwinPreTrainedModel''', '''SwinBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSwinForImageClassification''', '''TFSwinForMaskedImageModeling''', '''TFSwinModel''', '''TFSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ ) -> int: if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return 0 elif n == 2: return 1 else: _a : Union[str, Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def __lowerCamelCase ( lowerCAmelCase_ ) -> int: _a : Optional[int] = 0 _a : str = 2 while digits < n: index += 1 _a : Optional[int] = len(str(fibonacci(lowerCAmelCase_ ) ) ) return index def __lowerCamelCase ( lowerCAmelCase_ = 1000 ) -> int: return fibonacci_digits_index(lowerCAmelCase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
89
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Optional[int] = BarthezTokenizer lowerCAmelCase : int = BarthezTokenizerFast lowerCAmelCase : Dict = True lowerCAmelCase : str = True def __lowercase ( self : List[Any] ): super().setUp() _a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase ) _a : Union[str, Any] = tokenizer def __lowercase ( self : Tuple ): _a : Optional[Any] = '<pad>' _a : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase ) def __lowercase ( self : str ): _a : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<s>' ) self.assertEqual(vocab_keys[1] ,'<pad>' ) self.assertEqual(vocab_keys[-1] ,'<mask>' ) self.assertEqual(len(_UpperCAmelCase ) ,101122 ) def __lowercase ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size ,101122 ) @require_torch def __lowercase ( self : Dict ): _a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _a : Dict = [0, 57, 3018, 70307, 91, 2] _a : Dict = self.tokenizer( _UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) self.assertEqual((2, 6) ,batch.input_ids.shape ) self.assertEqual((2, 6) ,batch.attention_mask.shape ) _a : Tuple = batch.input_ids.tolist()[0] self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): if not self.test_rust_tokenizer: return _a : str = self.get_tokenizer() _a : List[str] = self.get_rust_tokenizer() _a : Dict = 'I was born in 92000, and this is falsé.' _a : List[Any] = tokenizer.tokenize(_UpperCAmelCase ) _a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Union[str, Any] = self.get_rust_tokenizer() _a : Any = tokenizer.encode(_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) @slow def __lowercase ( self : Optional[int] ): # fmt: off _a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _a : Optional[Any] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
89
1
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup __lowerCAmelCase = [ '''kernels/rwkv/wkv_cuda.cu''', '''kernels/rwkv/wkv_op.cpp''', '''kernels/deformable_detr/ms_deform_attn.h''', '''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''', '''models/graphormer/algos_graphormer.pyx''', ] def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple: # Test all the extensions added in the setup for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''') __lowerCAmelCase = parser.parse_args() if args.check_lib: __lowerCAmelCase = importlib.import_module('''transformers''') __lowerCAmelCase = Path(transformers_module.__file__).parent else: __lowerCAmelCase = Path.cwd() / '''build/lib/transformers''' if not test_custom_files_are_present(transformers_path): raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
89
'''simple docstring''' import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __magic_name__ ( _UpperCamelCase ): @require_torch def __lowercase ( self : Tuple ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _a : List[Any] = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(_UpperCAmelCase ) BertModel.from_pretrained(_UpperCAmelCase ) BertTokenizer.from_pretrained(_UpperCAmelCase ) pipeline(task='fill-mask' ,model=_UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _a : Tuple = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : int = '1' _a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : Any ): # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _a : int = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(_UpperCAmelCase ) BertModel.from_pretrained(_UpperCAmelCase ) BertTokenizer.from_pretrained(_UpperCAmelCase ) pipeline(task='fill-mask' ,model=_UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _a : str = self.get_env() _a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : List[str] ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n ' _a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n ' _a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n ' # baseline - just load from_pretrained with normal network _a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _a : Dict = self.get_env() _a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) # next emulate no network _a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : int = '1' _a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : int ): _a : Optional[Any] = '\nfrom transformers import pipeline\n ' _a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n ' _a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n ' _a : List[Any] = self.get_env() _a : Dict = '1' _a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )] _a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,1 ,result.stderr ) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,) @require_torch def __lowercase ( self : int ): _a : Optional[int] = '\nfrom transformers import AutoModel\n ' _a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n ' # baseline - just load from_pretrained with normal network _a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _a : Tuple = self.get_env() _a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : Optional[Any] = '1' _a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() )
89
1
'''simple docstring''' import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> Any: # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count('<mask>' ) == 1 _a : Tuple = torch.tensor(tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ).unsqueeze(0 ) # Batch size 1 _a : Union[str, Any] = model(lowerCAmelCase_ )[0] # The last hidden-state is the first element of the output tuple _a : int = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() _a : str = logits[0, masked_index, :] _a : int = logits.softmax(dim=0 ) _a , _a : Dict = prob.topk(k=lowerCAmelCase_ , dim=0 ) _a : str = ' '.join( [tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowerCAmelCase_ ) )] ) _a : Optional[int] = tokenizer.mask_token _a : Tuple = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ): _a : Optional[Any] = predicted_token_bpe.replace('\u2581' , ' ' ) if " {0}".format(lowerCAmelCase_ ) in masked_input: topk_filled_outputs.append( ( masked_input.replace(' {0}'.format(lowerCAmelCase_ ) , lowerCAmelCase_ ), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(lowerCAmelCase_ , lowerCAmelCase_ ), values[index].item(), predicted_token, ) ) return topk_filled_outputs __lowerCAmelCase = CamembertTokenizer.from_pretrained('''camembert-base''') __lowerCAmelCase = CamembertForMaskedLM.from_pretrained('''camembert-base''') model.eval() __lowerCAmelCase = '''Le camembert est <mask> :)''' print(fill_mask(masked_input, model, tokenizer, topk=3))
89
'''simple docstring''' def __lowerCamelCase ( ) -> Tuple: for n in range(1 , 1000000 ): yield n * (n + 1) // 2 def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]: _a : Any = 1 _a : Tuple = 2 while i * i <= n: _a : Tuple = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def __lowerCamelCase ( ) -> str: return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 ) if __name__ == "__main__": print(solution())
89
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase = { '''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''], '''tokenization_roc_bert''': ['''RoCBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoCBertForCausalLM''', '''RoCBertForMaskedLM''', '''RoCBertForMultipleChoice''', '''RoCBertForPreTraining''', '''RoCBertForQuestionAnswering''', '''RoCBertForSequenceClassification''', '''RoCBertForTokenClassification''', '''RoCBertLayer''', '''RoCBertModel''', '''RoCBertPreTrainedModel''', '''load_tf_weights_in_roc_bert''', ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class __magic_name__ ( _UpperCamelCase ): def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,): super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Tuple = Sql( cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,) def __lowercase ( self : Dict ): _a : Optional[Any] = None _a : Dict = None _a : Dict = None _a : Optional[int] = None self.builder.download_and_prepare( download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,) # Build dataset for splits _a : List[str] = self.builder.as_dataset( split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory ) return dataset class __magic_name__ : def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _a : Dict = dataset _a : List[Any] = name _a : Tuple = con _a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _a : List[Any] = num_proc _a : Tuple = to_sql_kwargs def __lowercase ( self : List[Any] ): _a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase ) _a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase ) _a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase ) _a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs ) return written def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ): _a , _a , _a : Any = args _a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs _a : Dict = query_table( table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,) _a : Tuple = batch.to_pandas() _a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase ) return num_rows or len(_UpperCAmelCase ) def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ): _a : Union[str, Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _a , _a : List[Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += num_rows return written
89
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __lowerCAmelCase = { '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } __lowerCAmelCase = { '''yjernite/retribert-base-uncased''': 512, } __lowerCAmelCase = { '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : Any = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase : List[str] = RetriBertTokenizer lowerCAmelCase : Tuple = ['input_ids', 'attention_mask'] def __init__( self : str ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]="[UNK]" ,_UpperCAmelCase : List[Any]="[SEP]" ,_UpperCAmelCase : str="[PAD]" ,_UpperCAmelCase : int="[CLS]" ,_UpperCAmelCase : str="[MASK]" ,_UpperCAmelCase : int=True ,_UpperCAmelCase : Tuple=None ,**_UpperCAmelCase : str ,): super().__init__( _UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,do_lower_case=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,cls_token=_UpperCAmelCase ,mask_token=_UpperCAmelCase ,tokenize_chinese_chars=_UpperCAmelCase ,strip_accents=_UpperCAmelCase ,**_UpperCAmelCase ,) _a : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' ,_UpperCAmelCase ) != do_lower_case or normalizer_state.get('strip_accents' ,_UpperCAmelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' ,_UpperCAmelCase ) != tokenize_chinese_chars ): _a : List[Any] = getattr(_UpperCAmelCase ,normalizer_state.pop('type' ) ) _a : Union[str, Any] = do_lower_case _a : Optional[int] = strip_accents _a : Dict = tokenize_chinese_chars _a : Dict = normalizer_class(**_UpperCAmelCase ) _a : Dict = do_lower_case def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any]=None ): _a : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowercase ( self : List[str] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ): _a : Optional[int] = [self.sep_token_id] _a : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowercase ( self : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ): _a : Tuple = self._tokenizer.model.save(_UpperCAmelCase ,name=_UpperCAmelCase ) return tuple(_UpperCAmelCase )
89
'''simple docstring''' from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray: _a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ ) return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) ) if __name__ == "__main__": # read original image __lowerCAmelCase = cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value __lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __lowerCAmelCase , __lowerCAmelCase = gray_img.shape # set different points to rotate image __lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __lowerCAmelCase = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __lowerCAmelCase = plt.figure(1) __lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
89
1
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str: if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) _a : Optional[int] = str(bin(lowerCAmelCase_ ) ) binary_number += "0" * shift_amount return binary_number def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str: if number < 0 or shift_amount < 0: raise ValueError('both inputs must be positive integers' ) _a : Tuple = str(bin(lowerCAmelCase_ ) )[2:] if shift_amount >= len(lowerCAmelCase_ ): return "0b0" _a : Optional[int] = binary_number[: len(lowerCAmelCase_ ) - shift_amount] return "0b" + shifted_binary_number def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str: if number >= 0: # Get binary representation of positive number _a : Optional[Any] = '0' + str(bin(lowerCAmelCase_ ) ).strip('-' )[2:] else: # Get binary (2's complement) representation of negative number _a : List[str] = len(bin(lowerCAmelCase_ )[3:] ) # Find 2's complement of number _a : Optional[int] = bin(abs(lowerCAmelCase_ ) - (1 << binary_number_length) )[3:] _a : Dict = ( '1' + '0' * (binary_number_length - len(lowerCAmelCase_ )) + binary_number ) if shift_amount >= len(lowerCAmelCase_ ): return "0b" + binary_number[0] * len(lowerCAmelCase_ ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(lowerCAmelCase_ ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { '''configuration_bigbird_pegasus''': [ '''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BigBirdPegasusConfig''', '''BigBirdPegasusOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BigBirdPegasusForCausalLM''', '''BigBirdPegasusForConditionalGeneration''', '''BigBirdPegasusForQuestionAnswering''', '''BigBirdPegasusForSequenceClassification''', '''BigBirdPegasusModel''', '''BigBirdPegasusPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP, BigBirdPegasusConfig, BigBirdPegasusOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bigbird_pegasus import ( BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST, BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, BigBirdPegasusPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' import math def __lowerCamelCase ( lowerCAmelCase_ ) -> int: if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _a : Tuple = f"""Input value of [number={number}] must be an integer""" raise TypeError(lowerCAmelCase_ ) if number < 1: _a : List[Any] = f"""Input value of [number={number}] must be > 0""" raise ValueError(lowerCAmelCase_ ) elif number == 1: return 3 elif number == 2: return 5 else: _a : List[Any] = int(math.log(number // 3 , 2 ) ) + 2 _a : str = [3, 5] _a : Optional[int] = 2 _a : Union[str, Any] = 3 for block in range(1 , lowerCAmelCase_ ): for _ in range(lowerCAmelCase_ ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): __lowerCAmelCase = 0 try: __lowerCAmelCase = proth(number) except ValueError: print(f"""ValueError: there is no {number}th Proth number""") continue print(f"""The {number}th Proth number: {value}""")
89
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]: _a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) _a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ ) _a : List[str] = tok.pad_token_id def get_lens(lowerCAmelCase_ ): _a : Dict = tqdm( DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _a : Union[str, Any] = [] for batch in dl: _a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist() _a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ): max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) ) else: max_lens.extend(lowerCAmelCase_ ) return max_lens _a : str = get_lens(lowerCAmelCase_ ) _a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ ) _a : Dict = get_lens(lowerCAmelCase_ ) pickle_save(lowerCAmelCase_ , train_ds.len_file ) pickle_save(lowerCAmelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
89
1
'''simple docstring''' import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel __lowerCAmelCase = HfApi() __lowerCAmelCase = {} # fmt: off __lowerCAmelCase = torch.tensor([ -0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467, 1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189, -1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839, 0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557 ]) __lowerCAmelCase = torch.tensor([ -2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436, 1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208, -2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948, 2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365 ]) __lowerCAmelCase = torch.tensor([ -0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869, -0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304, -0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925, 0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943 ]) __lowerCAmelCase = torch.tensor([ 0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172, -0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309, 0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805, -0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505 ]) __lowerCAmelCase = torch.tensor([ 0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133, -0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395, 0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559, -0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386 ]) __lowerCAmelCase = torch.tensor([ 0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078, -0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330, 0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683, -0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431 ]) __lowerCAmelCase = torch.tensor([ 0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042, -0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398, 0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574, -0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390 ]) __lowerCAmelCase = torch.tensor([ 0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042, -0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290, 0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746, -0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473 ]) __lowerCAmelCase = torch.tensor([ -1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330, 1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243, -2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810, 1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251]) __lowerCAmelCase = torch.tensor([ -1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324, 0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181, -2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259, 1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266 ]) __lowerCAmelCase = torch.tensor([ -1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212, 0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027, -2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131, 1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355 ]) __lowerCAmelCase = torch.tensor([ -2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959, 1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351, -3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341, 3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066 ]) __lowerCAmelCase = torch.tensor([ -2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740, 1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398, -2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395, 2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243 ]) __lowerCAmelCase = torch.tensor([ -2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336, 1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908, -3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560, 3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343 ]) __lowerCAmelCase = torch.tensor([ -1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344, 1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391, -2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439, 1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219 ]) # fmt: on __lowerCAmelCase = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": __lowerCAmelCase = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(f"""Started running {mod.modelId}!!!""") if mod.modelId.startswith('''CompVis'''): __lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: __lowerCAmelCase = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) __lowerCAmelCase = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) __lowerCAmelCase = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): __lowerCAmelCase = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3 ) print(f"""{mod.modelId} has passed successfully!!!""")
89
'''simple docstring''' from typing import Any class __magic_name__ : def __init__( self : List[Any] ,_UpperCAmelCase : Any ): _a : List[Any] = data _a : Union[str, Any] = None def __repr__( self : Any ): return F"""Node({self.data})""" class __magic_name__ : def __init__( self : int ): _a : Tuple = None def __iter__( self : str ): _a : int = self.head while node: yield node.data _a : Union[str, Any] = node.next def __len__( self : Optional[Any] ): return sum(1 for _ in self ) def __repr__( self : str ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __getitem__( self : Tuple ,_UpperCAmelCase : int ): if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ): if not 0 <= index < len(self ): raise ValueError('list index out of range.' ) _a : Any = self.head for _ in range(_UpperCAmelCase ): _a : Optional[Any] = current.next _a : Optional[int] = data def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ): self.insert_nth(len(self ) ,_UpperCAmelCase ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ): self.insert_nth(0 ,_UpperCAmelCase ) def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ): if not 0 <= index <= len(self ): raise IndexError('list index out of range' ) _a : int = Node(_UpperCAmelCase ) if self.head is None: _a : str = new_node elif index == 0: _a : List[str] = self.head # link new_node to head _a : Union[str, Any] = new_node else: _a : int = self.head for _ in range(index - 1 ): _a : Union[str, Any] = temp.next _a : List[str] = temp.next _a : Optional[int] = new_node def __lowercase ( self : Optional[int] ): # print every node data print(self ) def __lowercase ( self : str ): return self.delete_nth(0 ) def __lowercase ( self : str ): # delete from tail return self.delete_nth(len(self ) - 1 ) def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ): if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('List index out of range.' ) _a : Optional[Any] = self.head # default first node if index == 0: _a : int = self.head.next else: _a : int = self.head for _ in range(index - 1 ): _a : str = temp.next _a : str = temp.next _a : int = temp.next.next return delete_node.data def __lowercase ( self : List[Any] ): return self.head is None def __lowercase ( self : Tuple ): _a : List[Any] = None _a : Tuple = self.head while current: # Store the current node's next node. _a : Dict = current.next # Make the current node's next point backwards _a : str = prev # Make the previous node be the current node _a : Tuple = current # Make the current node the next node (to progress iteration) _a : Optional[Any] = next_node # Return prev in order to put the head at the end _a : int = prev def __lowerCamelCase ( ) -> None: _a : List[str] = LinkedList() assert linked_list.is_empty() is True assert str(lowerCAmelCase_ ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(lowerCAmelCase_ ) == i linked_list.insert_nth(lowerCAmelCase_ , i + 1 ) assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(lowerCAmelCase_ ) == 9 assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): _a : Union[str, Any] = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) ) def __lowerCamelCase ( ) -> None: _a : Dict = [ -9, 100, Node(77345112 ), 'dlrow olleH', 7, 5555, 0, -192.55_555, 'Hello, world!', 77.9, Node(10 ), None, None, 12.20, ] _a : List[Any] = LinkedList() for i in test_input: linked_list.insert_tail(lowerCAmelCase_ ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head _a : List[str] = linked_list.delete_head() assert result == -9 assert ( str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail _a : Dict = linked_list.delete_tail() assert result == 12.2 assert ( str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list _a : Optional[Any] = linked_list.delete_nth(10 ) assert result is None assert ( str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('Hello again, world!' ) ) assert ( str(lowerCAmelCase_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(lowerCAmelCase_ ) assert ( str(lowerCAmelCase_ ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(lowerCAmelCase_ ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def __lowerCamelCase ( ) -> Union[str, Any]: from doctest import testmod testmod() _a : Optional[int] = LinkedList() linked_list.insert_head(input('Inserting 1st at head ' ).strip() ) linked_list.insert_head(input('Inserting 2nd at head ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() ) linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() ) print('\nPrint list:' ) linked_list.print_list() print('\nDelete head' ) linked_list.delete_head() print('Delete tail' ) linked_list.delete_tail() print('\nPrint list:' ) linked_list.print_list() print('\nReverse linked list' ) linked_list.reverse() print('\nPrint list:' ) linked_list.print_list() print('\nString representation of linked list:' ) print(lowerCAmelCase_ ) print('\nReading/changing Node data using indexing:' ) print(f"""Element at Position 1: {linked_list[1]}""" ) _a : Optional[Any] = input('Enter New Value: ' ).strip() print('New list:' ) print(lowerCAmelCase_ ) print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" ) if __name__ == "__main__": main()
89
1
'''simple docstring''' import math from enum import Enum from typing import Optional, Union from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from .utils import logging __lowerCAmelCase = logging.get_logger(__name__) class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Dict = 'linear' lowerCAmelCase : Union[str, Any] = 'cosine' lowerCAmelCase : Dict = 'cosine_with_restarts' lowerCAmelCase : Dict = 'polynomial' lowerCAmelCase : str = 'constant' lowerCAmelCase : Tuple = 'constant_with_warmup' lowerCAmelCase : List[str] = 'piecewise_constant' def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = -1 ) -> Union[str, Any]: return LambdaLR(lowerCAmelCase_ , lambda lowerCAmelCase_ : 1 , last_epoch=lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 ) -> List[str]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) ) return 1.0 return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = -1 ) -> List[str]: _a : List[str] = {} _a : str = step_rules.split(',' ) for rule_str in rule_list[:-1]: _a , _a : Optional[int] = rule_str.split(':' ) _a : Union[str, Any] = int(lowerCAmelCase_ ) _a : int = float(lowerCAmelCase_ ) _a : str = value _a : str = float(rule_list[-1] ) def create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ): def rule_func(lowerCAmelCase_ ) -> float: _a : Tuple = sorted(rules_dict.keys() ) for i, sorted_step in enumerate(lowerCAmelCase_ ): if steps < sorted_step: return rules_dict[sorted_steps[i]] return last_lr_multiple return rule_func _a : Optional[Any] = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=-1 ) -> List[str]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) return max( 0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.5 , lowerCAmelCase_ = -1 ) -> Optional[Any]: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) _a : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = -1 ) -> str: def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) _a : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) ) if progress >= 1.0: return 0.0 return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) ) return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-7 , lowerCAmelCase_=1.0 , lowerCAmelCase_=-1 ) -> List[Any]: _a : int = optimizer.defaults['lr'] if not (lr_init > lr_end): raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" ) def lr_lambda(lowerCAmelCase_ ): if current_step < num_warmup_steps: return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) ) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: _a : int = lr_init - lr_end _a : List[str] = num_training_steps - num_warmup_steps _a : Tuple = 1 - (current_step - num_warmup_steps) / decay_steps _a : Dict = lr_range * pct_remaining**power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) __lowerCAmelCase = { SchedulerType.LINEAR: get_linear_schedule_with_warmup, SchedulerType.COSINE: get_cosine_schedule_with_warmup, SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup, SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup, SchedulerType.CONSTANT: get_constant_schedule, SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup, SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule, } def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = -1 , ) -> Any: _a : int = SchedulerType(lowerCAmelCase_ ) _a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name] if name == SchedulerType.CONSTANT: return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) if name == SchedulerType.PIECEWISE_CONSTANT: return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_warmup_steps` if num_warmup_steps is None: raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" ) if name == SchedulerType.CONSTANT_WITH_WARMUP: return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ ) # All other schedulers require `num_training_steps` if num_training_steps is None: raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" ) if name == SchedulerType.COSINE_WITH_RESTARTS: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) if name == SchedulerType.POLYNOMIAL: return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , ) return schedule_func( lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
89
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow __lowerCAmelCase = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,): _a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )] if identifier is not None: _a : str = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): for n_ in n_identifier: _a : int = [file for file in files if n_ not in file] else: _a : Optional[Any] = [file for file in files if n_identifier not in file] _a : Dict = ignore_files or [] ignore_files.append('__init__.py' ) _a : List[str] = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' ,_UpperCAmelCase ) if only_modules: _a : Any = file.split('.' )[0] try: _a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase ) _a : Dict = doctest.DocTestSuite(_UpperCAmelCase ) _a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase ) self.assertIs(len(result.failures ) ,0 ) except AttributeError: logger.info(F"""{module_identifier} is not a module.""" ) else: _a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed ,0 ) def __lowercase ( self : Union[str, Any] ): _a : Optional[Any] = Path('src/transformers' ) _a : Optional[Any] = 'modeling' _a : Union[str, Any] = [ 'modeling_ctrl.py', 'modeling_tf_ctrl.py', ] self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase ) def __lowercase ( self : int ): _a : str = Path('src/transformers' ) _a : List[str] = 'tokenization' self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ) def __lowercase ( self : int ): _a : Any = Path('src/transformers' ) _a : str = 'configuration' self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ) def __lowercase ( self : Dict ): _a : Tuple = Path('src/transformers' ) _a : Optional[int] = ['configuration', 'modeling', 'tokenization'] self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): _a : Union[str, Any] = Path('docs/source' ) _a : List[str] = ['favicon.ico'] self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
89
1
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase = { '''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''], '''tokenization_cpmant''': ['''CpmAntTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CpmAntForCausalLM''', '''CpmAntModel''', '''CpmAntPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
'''simple docstring''' import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) __lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]: for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: _a : List[Any] = model_type_to_module_name(lowerCAmelCase_ ) _a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' ) try: return getattr(lowerCAmelCase_ , lowerCAmelCase_ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _a : Dict = importlib.import_module('transformers' ) if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ): return getattr(lowerCAmelCase_ , lowerCAmelCase_ ) return None def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple: _a : List[str] = get_file_from_repo( lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , ) if resolved_config_file is None: logger.info( 'Could not locate the image processor configuration file, will try to use the model config instead.' ) return {} with open(lowerCAmelCase_ , encoding='utf-8' ) as reader: return json.load(lowerCAmelCase_ ) class __magic_name__ : def __init__( self : List[str] ): raise EnvironmentError( 'AutoImageProcessor is designed to be instantiated ' 'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(_UpperCAmelCase ) def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ): _a : Any = kwargs.pop('config' ,_UpperCAmelCase ) _a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase ) _a : Any = True _a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase ) _a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase ) _a : int = None if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ): _a : Any = config_dict['auto_map']['AutoImageProcessor'] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: _a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase ) if feature_extractor_class is not None: logger.warning( 'Could not find image processor class in the image processor config or the model config. Loading' ' based on pattern matching with the model\'s feature extractor configuration.' ) _a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' ) if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ): _a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor'] _a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' ) logger.warning( 'Could not find image processor auto map in the image processor config or the model config.' ' Loading based on pattern matching with the model\'s feature extractor configuration.' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) # It could be in `config.image_processor_type`` _a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase ) if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map: _a : Union[str, Any] = config.auto_map['AutoImageProcessor'] if image_processor_class is not None: _a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase ) _a : List[str] = image_processor_auto_map is not None _a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING _a : Optional[int] = resolve_trust_remote_code( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) if has_remote_code and trust_remote_code: _a : Dict = get_class_from_dynamic_module( _UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase ) _a : int = kwargs.pop('code_revision' ,_UpperCAmelCase ) if os.path.isdir(_UpperCAmelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING: _a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )] return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase ) raise ValueError( F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """ F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """ F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ): IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
89
1
'''simple docstring''' import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __magic_name__ : def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Union[str, Any]=13 ,_UpperCAmelCase : Optional[int]=30 ,_UpperCAmelCase : Union[str, Any]=2 ,_UpperCAmelCase : List[str]=3 ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Tuple=32 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : str=4 ,_UpperCAmelCase : int=37 ,_UpperCAmelCase : int="gelu" ,_UpperCAmelCase : List[str]=0.1 ,_UpperCAmelCase : Optional[Any]=0.1 ,_UpperCAmelCase : List[Any]=10 ,_UpperCAmelCase : str=0.02 ,_UpperCAmelCase : List[str]=None ,): _a : int = parent _a : str = batch_size _a : Tuple = image_size _a : str = patch_size _a : List[str] = num_channels _a : Union[str, Any] = is_training _a : Union[str, Any] = use_labels _a : List[Any] = hidden_size _a : Tuple = num_hidden_layers _a : List[str] = num_attention_heads _a : List[Any] = intermediate_size _a : Union[str, Any] = hidden_act _a : Optional[Any] = hidden_dropout_prob _a : Dict = attention_probs_dropout_prob _a : Optional[Any] = type_sequence_label_size _a : List[str] = initializer_range _a : List[Any] = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _a : int = (image_size // patch_size) ** 2 _a : List[str] = num_patches + 1 def __lowercase ( self : str ): _a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : Any = None if self.use_labels: _a : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _a : List[str] = self.get_config() return config, pixel_values, labels def __lowercase ( self : List[str] ): return ViTMSNConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ): _a : Optional[int] = ViTMSNModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Tuple = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ): _a : Optional[int] = self.type_sequence_label_size _a : Any = ViTMSNForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Optional[Any] = model(_UpperCAmelCase ,labels=_UpperCAmelCase ) print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}' ) print('Labels: {labels}' ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images _a : Tuple = 1 _a : Optional[Any] = ViTMSNForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _a : Union[str, Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def __lowercase ( self : Dict ): _a : int = self.prepare_config_and_inputs() _a , _a , _a : List[str] = config_and_inputs _a : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Any = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () lowerCAmelCase : Optional[int] = ( {'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : List[Any] = False lowerCAmelCase : Optional[int] = False lowerCAmelCase : Optional[int] = False lowerCAmelCase : Optional[int] = False def __lowercase ( self : Tuple ): _a : List[Any] = ViTMSNModelTester(self ) _a : Dict = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 ) def __lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason='ViTMSN does not use inputs_embeds' ) def __lowercase ( self : str ): pass def __lowercase ( self : List[str] ): _a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) _a : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) ) def __lowercase ( self : Optional[int] ): _a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = model_class(_UpperCAmelCase ) _a : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : List[str] = [*signature.parameters.keys()] _a : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_UpperCAmelCase ) def __lowercase ( self : int ): _a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def __lowercase ( self : Dict ): _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def __lowercase ( self : List[Any] ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : List[str] = ViTMSNModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __lowerCamelCase ( ) -> Tuple: _a : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): @cached_property def __lowercase ( self : Optional[Any] ): return ViTImageProcessor.from_pretrained('facebook/vit-msn-small' ) if is_vision_available() else None @slow def __lowercase ( self : Tuple ): torch.manual_seed(2 ) _a : List[str] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small' ).to(_UpperCAmelCase ) _a : List[str] = self.default_image_processor _a : Dict = prepare_img() _a : Union[str, Any] = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _a : List[str] = model(**_UpperCAmelCase ) # verify the logits _a : Optional[int] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,_UpperCAmelCase ) _a : Any = torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
89
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __lowerCAmelCase = None __lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>''' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __lowerCAmelCase = [ np.dtype('''|b1'''), np.dtype('''|u1'''), np.dtype('''<u2'''), np.dtype('''>u2'''), np.dtype('''<i2'''), np.dtype('''>i2'''), np.dtype('''<u4'''), np.dtype('''>u4'''), np.dtype('''<i4'''), np.dtype('''>i4'''), np.dtype('''<f4'''), np.dtype('''>f4'''), np.dtype('''<f8'''), np.dtype('''>f8'''), ] @dataclass class __magic_name__ : lowerCAmelCase : bool = True lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "PIL.Image.Image" lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase ) def __call__( self : Union[str, Any] ): return self.pa_type def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Optional[Any] = np.array(_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": value, "bytes": None} elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": None, "bytes": value} elif isinstance(_UpperCAmelCase ,np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase ,PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(_UpperCAmelCase ) elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ): if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support decoding images, please install \'Pillow\'.' ) if token_per_repo_id is None: _a : Dict = {} _a , _a : str = value['path'], value['bytes'] if bytes_ is None: if path is None: raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" ) else: if is_local_path(_UpperCAmelCase ): _a : Any = PIL.Image.open(_UpperCAmelCase ) else: _a : List[Any] = path.split('::' )[-1] try: _a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id'] _a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase ) except ValueError: _a : int = None with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f: _a : Tuple = BytesIO(f.read() ) _a : Union[str, Any] = PIL.Image.open(bytes_ ) else: _a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def __lowercase ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value('binary' ), "path": Value('string' ), } ) def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): _a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) _a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: _a : Union[str, Any] = storage.field('bytes' ) else: _a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: _a : Union[str, Any] = storage.field('path' ) else: _a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_list(storage.type ): _a : List[str] = pa.array( [encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,) _a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays( [bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(_UpperCAmelCase : Tuple ): with xopen(_UpperCAmelCase ,'rb' ) as f: _a : int = f.read() return bytes_ _a : Any = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] ,type=pa.binary() ,) _a : Optional[Any] = pa.array( [os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,) _a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes: _a : Optional[int] = BytesIO() if image.format in list_image_compression_formats(): _a : Optional[Any] = image.format else: _a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF' image.save(lowerCAmelCase_ , format=lowerCAmelCase_ ) return buffer.getvalue() def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) _a : List[Any] = array.dtype _a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER _a : Union[str, Any] = dtype.kind _a : Union[str, Any] = dtype.itemsize _a : List[Any] = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _a : Optional[int] = np.dtype('|u1' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _a : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ ) _a : List[Any] = np.dtype(lowerCAmelCase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) _a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) ) return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if objs: _a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowerCAmelCase_ , np.ndarray ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] elif isinstance(lowerCAmelCase_ , PIL.Image.Image ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] else: return objs else: return objs
89
1
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) class __magic_name__ ( _UpperCamelCase ): def __init__( self : Dict ,_UpperCAmelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ): super().__init__() _a : int = nn.ModuleList(_UpperCAmelCase ) def __lowercase ( self : int ,_UpperCAmelCase : torch.FloatTensor ,_UpperCAmelCase : Union[torch.Tensor, float, int] ,_UpperCAmelCase : torch.Tensor ,_UpperCAmelCase : List[torch.tensor] ,_UpperCAmelCase : List[float] ,_UpperCAmelCase : Optional[torch.Tensor] = None ,_UpperCAmelCase : Optional[torch.Tensor] = None ,_UpperCAmelCase : Optional[torch.Tensor] = None ,_UpperCAmelCase : Optional[Dict[str, Any]] = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = True ,): for i, (image, scale, controlnet) in enumerate(zip(_UpperCAmelCase ,_UpperCAmelCase ,self.nets ) ): _a , _a : Optional[Any] = controlnet( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,) # merge samples if i == 0: _a , _a : int = down_samples, mid_sample else: _a : Union[str, Any] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(_UpperCAmelCase ,_UpperCAmelCase ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Union[str, os.PathLike] ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : Callable = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : Optional[str] = None ,): _a : Union[str, Any] = 0 _a : Union[str, Any] = save_directory for controlnet in self.nets: controlnet.save_pretrained( _UpperCAmelCase ,is_main_process=_UpperCAmelCase ,save_function=_UpperCAmelCase ,safe_serialization=_UpperCAmelCase ,variant=_UpperCAmelCase ,) idx += 1 _a : str = model_path_to_save + F"""_{idx}""" @classmethod def __lowercase ( cls : str ,_UpperCAmelCase : Optional[Union[str, os.PathLike]] ,**_UpperCAmelCase : Optional[Any] ): _a : Any = 0 _a : int = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... _a : str = pretrained_model_path while os.path.isdir(_UpperCAmelCase ): _a : int = ControlNetModel.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase ) controlnets.append(_UpperCAmelCase ) idx += 1 _a : int = pretrained_model_path + F"""_{idx}""" logger.info(F"""{len(_UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.""" ) if len(_UpperCAmelCase ) == 0: raise ValueError( F"""No ControlNets found under {os.path.dirname(_UpperCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.""" ) return cls(_UpperCAmelCase )
89
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]: _a : Optional[int] = list(lowerCAmelCase_ ) _a : Optional[Any] = list(lowerCAmelCase_ ) _a : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if lista[i] != lista[i]: count += 1 _a : Optional[int] = '_' if count > 1: return False else: return "".join(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]: _a : Optional[int] = [] while True: _a : Any = ['$'] * len(lowerCAmelCase_ ) _a : List[str] = [] for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): _a : Optional[int] = compare_string(binary[i] , binary[j] ) if k is False: _a : Optional[Any] = '*' _a : Optional[Any] = '*' temp.append('X' ) for i in range(len(lowerCAmelCase_ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(lowerCAmelCase_ ) == 0: return pi _a : Any = list(set(lowerCAmelCase_ ) ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : int = [] for minterm in minterms: _a : Optional[int] = '' for _ in range(lowerCAmelCase_ ): _a : Union[str, Any] = str(minterm % 2 ) + string minterm //= 2 temp.append(lowerCAmelCase_ ) return temp def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool: _a : int = list(lowerCAmelCase_ ) _a : Union[str, Any] = list(lowerCAmelCase_ ) _a : str = 0 for i in range(len(lowerCAmelCase_ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : List[Any] = [] _a : Optional[Any] = [0] * len(lowerCAmelCase_ ) for i in range(len(chart[0] ) ): _a : Union[str, Any] = 0 _a : int = -1 for j in range(len(lowerCAmelCase_ ) ): if chart[j][i] == 1: count += 1 _a : int = j if count == 1: _a : List[Any] = 1 for i in range(len(lowerCAmelCase_ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(lowerCAmelCase_ ) ): _a : Any = 0 temp.append(prime_implicants[i] ) while True: _a : Union[str, Any] = 0 _a : List[Any] = -1 _a : str = 0 for i in range(len(lowerCAmelCase_ ) ): _a : Union[str, Any] = chart[i].count(1 ) if count_n > max_n: _a : Any = count_n _a : int = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(lowerCAmelCase_ ) ): _a : List[str] = 0 def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]: _a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )] for i in range(len(lowerCAmelCase_ ) ): _a : str = prime_implicants[i].count('_' ) for j in range(len(lowerCAmelCase_ ) ): if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ): _a : Optional[Any] = 1 return chart def __lowerCamelCase ( ) -> None: _a : Optional[int] = int(input('Enter the no. of variables\n' ) ) _a : List[Any] = [ float(lowerCAmelCase_ ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] _a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ ) _a : Dict = check(lowerCAmelCase_ ) print('Prime Implicants are:' ) print(lowerCAmelCase_ ) _a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ ) _a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ ) print('Essential Prime Implicants are:' ) print(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
89
1
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger('''transformers.models.encodec''') __lowerCAmelCase = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } __lowerCAmelCase = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } __lowerCAmelCase = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } __lowerCAmelCase = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } __lowerCAmelCase = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } __lowerCAmelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } __lowerCAmelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } __lowerCAmelCase = [] __lowerCAmelCase = [] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: for attribute in key.split('.' ): _a : Optional[int] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if weight_type is not None: _a : int = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape else: _a : Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": _a : Any = value elif weight_type == "weight_g": _a : Tuple = value elif weight_type == "weight_v": _a : Tuple = value elif weight_type == "bias": _a : Union[str, Any] = value elif weight_type == "running_mean": _a : int = value elif weight_type == "running_var": _a : Union[str, Any] = value elif weight_type == "num_batches_tracked": _a : Tuple = value elif weight_type == "weight_ih_l0": _a : Tuple = value elif weight_type == "weight_hh_l0": _a : int = value elif weight_type == "bias_ih_l0": _a : str = value elif weight_type == "bias_hh_l0": _a : Optional[int] = value elif weight_type == "weight_ih_l1": _a : Any = value elif weight_type == "weight_hh_l1": _a : Optional[Any] = value elif weight_type == "bias_ih_l1": _a : Any = value elif weight_type == "bias_hh_l1": _a : str = value else: _a : Optional[int] = value logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]: for key in ignore_keys: if key.endswith('.*' ): if name.startswith(key[:-1] ): return True elif ".*." in key: _a , _a : Union[str, Any] = key.split('.*.' ) if prefix in name and suffix in name: return True elif key in name: return True return False def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict: _a : Union[str, Any] = [] if model_name == "encodec_24khz" or "encodec_32khz": _a : Tuple = MAPPING_24K elif model_name == "encodec_48khz": _a : str = MAPPING_48K else: raise ValueError(f"""Unsupported model: {model_name}""" ) for name, value in orig_dict.items(): if should_ignore(lowerCAmelCase_ , lowerCAmelCase_ ): logger.info(f"""{name} was ignored""" ) continue _a : List[Any] = False for key, mapped_key in MAPPING.items(): if "*" in key: _a , _a : Optional[Any] = key.split('.*.' ) if prefix in name and suffix in name: _a : Tuple = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('embed' ) and name.endswith('embed_avg' ): continue _a : Union[str, Any] = True if "*" in mapped_key: _a : List[Any] = name.split(lowerCAmelCase_ )[0].split('.' )[-2] _a : Any = mapped_key.replace('*' , lowerCAmelCase_ ) if "weight_g" in name: _a : str = 'weight_g' elif "weight_v" in name: _a : Optional[int] = 'weight_v' elif "weight_ih_l0" in name: _a : List[str] = 'weight_ih_l0' elif "weight_hh_l0" in name: _a : Dict = 'weight_hh_l0' elif "bias_ih_l0" in name: _a : Tuple = 'bias_ih_l0' elif "bias_hh_l0" in name: _a : Optional[Any] = 'bias_hh_l0' elif "weight_ih_l1" in name: _a : Dict = 'weight_ih_l1' elif "weight_hh_l1" in name: _a : Dict = 'weight_hh_l1' elif "bias_ih_l1" in name: _a : Optional[Any] = 'bias_ih_l1' elif "bias_hh_l1" in name: _a : Optional[Any] = 'bias_hh_l1' elif "bias" in name: _a : Tuple = 'bias' elif "weight" in name: _a : List[str] = 'weight' elif "running_mean" in name: _a : Optional[Any] = 'running_mean' elif "running_var" in name: _a : int = 'running_var' elif "num_batches_tracked" in name: _a : List[Any] = 'num_batches_tracked' else: _a : List[str] = None set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) continue if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Union[str, Any]: if config_path is not None: _a : Optional[Any] = EncodecConfig.from_pretrained(lowerCAmelCase_ ) else: _a : int = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": _a : Dict = [8, 5, 4, 4] _a : Optional[int] = [2.2] _a : Optional[Any] = 64 _a : Any = 32000 _a : Dict = 2048 _a : Dict = False _a : List[Any] = False _a : List[Any] = False elif model_name == "encodec_48khz": _a : Optional[int] = [8, 5, 4, 2] _a : Optional[Any] = [3.0, 6.0, 12.0, 24.0] _a : str = 48000 _a : Optional[int] = 2 _a : List[str] = False _a : List[Any] = 'time_group_norm' _a : Optional[int] = True _a : str = 1.0 _a : List[Any] = 0.01 else: raise ValueError(f"""Unknown model name: {model_name}""" ) _a : Optional[int] = EncodecModel(lowerCAmelCase_ ) _a : List[Any] = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(lowerCAmelCase_ ) _a : Tuple = torch.load(lowerCAmelCase_ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights _a : Union[str, Any] = original_checkpoint['best_state'] recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) if repo_id: print('Pushing to the hub...' ) feature_extractor.push_to_hub(lowerCAmelCase_ ) model.push_to_hub(lowerCAmelCase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __lowerCAmelCase = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
89
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase = { '''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''], '''tokenization_cpmant''': ['''CpmAntTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CpmAntForCausalLM''', '''CpmAntModel''', '''CpmAntPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''snap-research/efficientformer-l1-300''': ( '''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json''' ), } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : List[str] = 'efficientformer' def __init__( self : Optional[int] ,_UpperCAmelCase : List[int] = [3, 2, 6, 4] ,_UpperCAmelCase : List[int] = [48, 96, 224, 448] ,_UpperCAmelCase : List[bool] = [True, True, True, True] ,_UpperCAmelCase : int = 448 ,_UpperCAmelCase : int = 32 ,_UpperCAmelCase : int = 4 ,_UpperCAmelCase : int = 7 ,_UpperCAmelCase : int = 5 ,_UpperCAmelCase : int = 8 ,_UpperCAmelCase : int = 4 ,_UpperCAmelCase : float = 0.0 ,_UpperCAmelCase : int = 16 ,_UpperCAmelCase : int = 3 ,_UpperCAmelCase : int = 3 ,_UpperCAmelCase : int = 3 ,_UpperCAmelCase : int = 2 ,_UpperCAmelCase : int = 1 ,_UpperCAmelCase : float = 0.0 ,_UpperCAmelCase : int = 1 ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : bool = True ,_UpperCAmelCase : float = 1E-5 ,_UpperCAmelCase : str = "gelu" ,_UpperCAmelCase : float = 0.02 ,_UpperCAmelCase : float = 1E-12 ,_UpperCAmelCase : int = 224 ,_UpperCAmelCase : float = 1E-05 ,**_UpperCAmelCase : Union[str, Any] ,): super().__init__(**_UpperCAmelCase ) _a : Optional[Any] = hidden_act _a : int = hidden_dropout_prob _a : Optional[int] = hidden_sizes _a : int = num_hidden_layers _a : Optional[Any] = num_attention_heads _a : Union[str, Any] = initializer_range _a : List[str] = layer_norm_eps _a : List[str] = patch_size _a : Tuple = num_channels _a : Optional[Any] = depths _a : str = mlp_expansion_ratio _a : Dict = downsamples _a : List[str] = dim _a : str = key_dim _a : str = attention_ratio _a : int = resolution _a : List[Any] = pool_size _a : Any = downsample_patch_size _a : str = downsample_stride _a : Tuple = downsample_pad _a : List[str] = drop_path_rate _a : List[Any] = num_metaad_blocks _a : str = distillation _a : Union[str, Any] = use_layer_scale _a : Any = layer_scale_init_value _a : List[Any] = image_size _a : List[Any] = batch_norm_eps
89
'''simple docstring''' import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : str = LayoutLMTokenizer lowerCAmelCase : Tuple = LayoutLMTokenizerFast lowerCAmelCase : List[Any] = True lowerCAmelCase : int = True def __lowercase ( self : Dict ): super().setUp() _a : int = [ '[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ): _a : Optional[int] = 'UNwant\u00E9d,running' _a : List[Any] = 'unwanted, running' return input_text, output_text def __lowercase ( self : Optional[int] ): _a : Optional[Any] = self.tokenizer_class(self.vocab_file ) _a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] ) def __lowercase ( self : Optional[int] ): pass
89
1
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float: _a : List[str] = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowerCAmelCase_ )] ) _a : Optional[int] = np.array(lowerCAmelCase_ ) _a : Optional[int] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowerCAmelCase_ ) ) , x.transpose() ) , lowerCAmelCase_ ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float: _a : Any = (1, 2, 1) _a : List[Any] = (1, 1, 0, 7) _a : int = SARIMAX( lowerCAmelCase_ , exog=lowerCAmelCase_ , order=lowerCAmelCase_ , seasonal_order=lowerCAmelCase_ ) _a : Optional[int] = model.fit(disp=lowerCAmelCase_ , maxiter=600 , method='nm' ) _a : Union[str, Any] = model_fit.predict(1 , len(lowerCAmelCase_ ) , exog=[test_match] ) return result[0] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float: _a : Any = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(lowerCAmelCase_ , lowerCAmelCase_ ) _a : Optional[int] = regressor.predict(lowerCAmelCase_ ) return y_pred[0] def __lowerCamelCase ( lowerCAmelCase_ ) -> float: train_user.sort() _a : Optional[Any] = np.percentile(lowerCAmelCase_ , 25 ) _a : Optional[int] = np.percentile(lowerCAmelCase_ , 75 ) _a : str = qa - qa _a : Optional[Any] = qa - (iqr * 0.1) return low_lim def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> bool: _a : str = 0 _a : List[Any] = 0 for i in list_vote: if i > actual_result: _a : Optional[int] = not_safe + 1 else: if abs(abs(lowerCAmelCase_ ) - abs(lowerCAmelCase_ ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) __lowerCAmelCase = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]] __lowerCAmelCase = pd.DataFrame( data_input, columns=['''total_user''', '''total_even''', '''days'''] ) __lowerCAmelCase = Normalizer().fit_transform(data_input_df.values) # split data __lowerCAmelCase = normalize_df[:, 2].tolist() __lowerCAmelCase = normalize_df[:, 0].tolist() __lowerCAmelCase = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) __lowerCAmelCase = normalize_df[:, [1, 2]].tolist() __lowerCAmelCase = x[: len(x) - 1] __lowerCAmelCase = x[len(x) - 1 :] # for linear regression & sarimax __lowerCAmelCase = total_date[: len(total_date) - 1] __lowerCAmelCase = total_user[: len(total_user) - 1] __lowerCAmelCase = total_match[: len(total_match) - 1] __lowerCAmelCase = total_date[len(total_date) - 1 :] __lowerCAmelCase = total_user[len(total_user) - 1 :] __lowerCAmelCase = total_match[len(total_match) - 1 :] # voting system with forecasting __lowerCAmelCase = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data __lowerCAmelCase = '''''' if data_safety_checker(res_vote, tst_user) else '''not ''' print('''Today\'s data is {not_str}safe.''')
89
'''simple docstring''' import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''microsoft/conditional-detr-resnet-50''': ( '''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json''' ), } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : Any = 'conditional_detr' lowerCAmelCase : List[str] = ['past_key_values'] lowerCAmelCase : Optional[int] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,): if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) _a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : str = backbone_config.get('model_type' ) _a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type] _a : List[Any] = config_class.from_dict(_UpperCAmelCase ) _a : Tuple = use_timm_backbone _a : Union[str, Any] = backbone_config _a : List[Any] = num_channels _a : Union[str, Any] = num_queries _a : Optional[Any] = d_model _a : Tuple = encoder_ffn_dim _a : Dict = encoder_layers _a : List[str] = encoder_attention_heads _a : Union[str, Any] = decoder_ffn_dim _a : Optional[int] = decoder_layers _a : int = decoder_attention_heads _a : Optional[int] = dropout _a : Tuple = attention_dropout _a : List[Any] = activation_dropout _a : str = activation_function _a : Optional[Any] = init_std _a : Union[str, Any] = init_xavier_std _a : List[Any] = encoder_layerdrop _a : List[Any] = decoder_layerdrop _a : Dict = encoder_layers _a : List[Any] = auxiliary_loss _a : Optional[int] = position_embedding_type _a : List[Any] = backbone _a : Optional[int] = use_pretrained_backbone _a : Optional[int] = dilation # Hungarian matcher _a : Tuple = class_cost _a : str = bbox_cost _a : Any = giou_cost # Loss coefficients _a : Tuple = mask_loss_coefficient _a : Dict = dice_loss_coefficient _a : Tuple = cls_loss_coefficient _a : Any = bbox_loss_coefficient _a : Dict = giou_loss_coefficient _a : Union[str, Any] = focal_alpha super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase ) @property def __lowercase ( self : Dict ): return self.encoder_attention_heads @property def __lowercase ( self : str ): return self.d_model def __lowercase ( self : int ): _a : List[str] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: _a : Dict = self.backbone_config.to_dict() _a : Union[str, Any] = self.__class__.model_type return output class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = version.parse('1.11' ) @property def __lowercase ( self : Dict ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ] ) @property def __lowercase ( self : Any ): return 1E-5 @property def __lowercase ( self : List[Any] ): return 12
89
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
89
'''simple docstring''' import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __magic_name__ : def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,): _a : Optional[Any] = parent _a : List[Any] = batch_size _a : str = image_size _a : Union[str, Any] = num_channels _a : List[Any] = num_stages _a : Dict = hidden_sizes _a : int = depths _a : Tuple = is_training _a : List[str] = use_labels _a : Dict = intermediate_size _a : int = hidden_act _a : int = num_labels _a : Any = initializer_range _a : Tuple = out_features _a : int = out_indices _a : List[Any] = scope def __lowercase ( self : Dict ): _a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : Union[str, Any] = None if self.use_labels: _a : Tuple = ids_tensor([self.batch_size] ,self.num_labels ) _a : str = self.get_config() return config, pixel_values, labels def __lowercase ( self : Any ): return ConvNextVaConfig( num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ): _a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Any = model(_UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ): _a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ): _a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Dict = model(_UpperCAmelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] ) # verify backbone works with out_features=None _a : Tuple = None _a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : List[str] = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def __lowercase ( self : Optional[Any] ): _a : Any = self.prepare_config_and_inputs() _a , _a , _a : Union[str, Any] = config_and_inputs _a : Any = {'pixel_values': pixel_values} return config, inputs_dict def __lowercase ( self : str ): _a : Tuple = self.prepare_config_and_inputs() _a , _a , _a : Tuple = config_and_inputs _a : List[Any] = {'pixel_values': pixel_values, 'labels': labels} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : str = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) lowerCAmelCase : str = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) lowerCAmelCase : int = False lowerCAmelCase : str = False lowerCAmelCase : Optional[Any] = False lowerCAmelCase : List[str] = False lowerCAmelCase : Optional[int] = False def __lowercase ( self : List[Any] ): _a : str = ConvNextVaModelTester(self ) _a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 ) def __lowercase ( self : Optional[Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowercase ( self : str ): return @unittest.skip(reason='ConvNextV2 does not use inputs_embeds' ) def __lowercase ( self : List[Any] ): pass @unittest.skip(reason='ConvNextV2 does not support input and output embeddings' ) def __lowercase ( self : Optional[int] ): pass @unittest.skip(reason='ConvNextV2 does not use feedforward chunking' ) def __lowercase ( self : Any ): pass def __lowercase ( self : List[str] ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Any = True if model_class.__name__ in [ *get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase ), ]: continue _a : Optional[Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() _a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : Optional[int] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : str ): if not self.model_tester.is_training: return for model_class in self.all_model_classes: _a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels() _a : Optional[int] = False _a : Tuple = True if ( model_class.__name__ in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )] or not model_class.supports_gradient_checkpointing ): continue _a : Tuple = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.gradient_checkpointing_enable() model.train() _a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : List[Any] = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : List[Any] ): _a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = model_class(_UpperCAmelCase ) _a : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : Dict = [*signature.parameters.keys()] _a : int = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_UpperCAmelCase ) def __lowercase ( self : int ): _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def __lowercase ( self : Any ): def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ): _a : Union[str, Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) ) _a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _a : str = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) _a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : int = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a : Optional[Any] = True check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : List[Any] ): _a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def __lowercase ( self : int ): for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __lowerCamelCase ( ) -> List[Any]: _a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): @cached_property def __lowercase ( self : Optional[Any] ): return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None @slow def __lowercase ( self : Any ): _a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase ) _a : Optional[int] = self.default_image_processor _a : str = prepare_img() _a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _a : Dict = model(**_UpperCAmelCase ) # verify the logits _a : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,_UpperCAmelCase ) _a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
89
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCAmelCase = { '''configuration_swiftformer''': [ '''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwiftFormerConfig''', '''SwiftFormerOnnxConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwiftFormerForImageClassification''', '''SwiftFormerModel''', '''SwiftFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) __lowerCAmelCase = logging.getLogger() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict: _a : Any = '\n'.join(lowerCAmelCase_ ) Path(lowerCAmelCase_ ).open('w' ).writelines(lowerCAmelCase_ ) __lowerCAmelCase = '''patrickvonplaten/t5-tiny-random''' __lowerCAmelCase = '''sshleifer/bart-tiny-random''' __lowerCAmelCase = '''sshleifer/tiny-mbart''' __lowerCAmelCase = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class __magic_name__ ( _UpperCamelCase ): def __lowercase ( self : List[Any] ,_UpperCAmelCase : Optional[int] ): _a : Tuple = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _a : Any = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _a : Union[str, Any] = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(_UpperCAmelCase ,_UpperCAmelCase ) _a : List[str] = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) _a : Union[str, Any] = 'translation_en_to_de' if model == T5_TINY else 'summarization' _a : Union[str, Any] = F""" run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 """.split() with patch.object(_UpperCAmelCase ,'argv' ,_UpperCAmelCase ): run_generate() assert Path(_UpperCAmelCase ).exists() # os.remove(Path(output_file_name)) def __lowercase ( self : Optional[int] ): self.run_eval_tester(_UpperCAmelCase ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def __lowercase ( self : List[Any] ,_UpperCAmelCase : Union[str, Any] ): self.run_eval_tester(_UpperCAmelCase ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ): _a : List[str] = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' _a : Tuple = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() _a : Optional[int] = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } _a : List[str] = Path(self.get_auto_remove_tmp_dir() ) _a : List[str] = str(tmp_dir / 'scores.json' ) _a : List[Any] = str(tmp_dir / 'val.target' ) _dump_articles(_UpperCAmelCase ,text['en'] ) _dump_articles(_UpperCAmelCase ,text['de'] ) _a : Optional[int] = 'translation_en_to_de' if model == T5_TINY else 'summarization' _a : str = F""" run_eval_search.py {model} {str(_UpperCAmelCase )} {str(_UpperCAmelCase )} --score_path {score_path} --reference_path {reference_path} --task {task} """.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(_UpperCAmelCase ,'argv' ,_UpperCAmelCase ): with CaptureStdout() as cs: run_search() _a : List[Any] = [' num_beams | length_penalty', model, 'Best score args'] _a : int = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(_UpperCAmelCase ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(_UpperCAmelCase ).exists() os.remove(Path(_UpperCAmelCase ) )
89
'''simple docstring''' import math def __lowerCamelCase ( lowerCAmelCase_ ) -> bool: _a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int: _a : int = 0 _a : Optional[Any] = 0 _a : int = 3 while True: _a : Tuple = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(lowerCAmelCase_ ): _a : Union[str, Any] = int(lowerCAmelCase_ ) total_partitions += 1 if check_partition_perfect(lowerCAmelCase_ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(lowerCAmelCase_ ) integer += 1 if __name__ == "__main__": print(f"""{solution() = }""")
89
1
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]: _a : Optional[int] = list(lowerCAmelCase_ ) _a : Optional[Any] = list(lowerCAmelCase_ ) _a : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if lista[i] != lista[i]: count += 1 _a : Optional[int] = '_' if count > 1: return False else: return "".join(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]: _a : Optional[int] = [] while True: _a : Any = ['$'] * len(lowerCAmelCase_ ) _a : List[str] = [] for i in range(len(lowerCAmelCase_ ) ): for j in range(i + 1 , len(lowerCAmelCase_ ) ): _a : Optional[int] = compare_string(binary[i] , binary[j] ) if k is False: _a : Optional[Any] = '*' _a : Optional[Any] = '*' temp.append('X' ) for i in range(len(lowerCAmelCase_ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(lowerCAmelCase_ ) == 0: return pi _a : Any = list(set(lowerCAmelCase_ ) ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : int = [] for minterm in minterms: _a : Optional[int] = '' for _ in range(lowerCAmelCase_ ): _a : Union[str, Any] = str(minterm % 2 ) + string minterm //= 2 temp.append(lowerCAmelCase_ ) return temp def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool: _a : int = list(lowerCAmelCase_ ) _a : Union[str, Any] = list(lowerCAmelCase_ ) _a : str = 0 for i in range(len(lowerCAmelCase_ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : List[Any] = [] _a : Optional[Any] = [0] * len(lowerCAmelCase_ ) for i in range(len(chart[0] ) ): _a : Union[str, Any] = 0 _a : int = -1 for j in range(len(lowerCAmelCase_ ) ): if chart[j][i] == 1: count += 1 _a : int = j if count == 1: _a : List[Any] = 1 for i in range(len(lowerCAmelCase_ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(lowerCAmelCase_ ) ): _a : Any = 0 temp.append(prime_implicants[i] ) while True: _a : Union[str, Any] = 0 _a : List[Any] = -1 _a : str = 0 for i in range(len(lowerCAmelCase_ ) ): _a : Union[str, Any] = chart[i].count(1 ) if count_n > max_n: _a : Any = count_n _a : int = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(lowerCAmelCase_ ) ): _a : List[str] = 0 def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]: _a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )] for i in range(len(lowerCAmelCase_ ) ): _a : str = prime_implicants[i].count('_' ) for j in range(len(lowerCAmelCase_ ) ): if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ): _a : Optional[Any] = 1 return chart def __lowerCamelCase ( ) -> None: _a : Optional[int] = int(input('Enter the no. of variables\n' ) ) _a : List[Any] = [ float(lowerCAmelCase_ ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] _a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ ) _a : Dict = check(lowerCAmelCase_ ) print('Prime Implicants are:' ) print(lowerCAmelCase_ ) _a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ ) _a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ ) print('Essential Prime Implicants are:' ) print(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
89
'''simple docstring''' import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple: _a : Any = [] for old_item in old_list: _a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' ) _a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' ) _a : str = new_item.replace('out_layers.0' , 'norm2' ) _a : List[str] = new_item.replace('out_layers.3' , 'conv2' ) _a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' ) _a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' ) _a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any: _a : List[str] = [] for old_item in old_list: _a : List[Any] = old_item _a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' ) _a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' ) _a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) _a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) _a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any: assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _a : Optional[Any] = old_checkpoint[path] _a : Optional[Any] = old_tensor.shape[0] // 3 _a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _a : int = old_tensor.shape[0] // config['num_head_channels'] // 3 _a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 ) _a : Dict = query.reshape(lowerCAmelCase_ ) _a : str = key.reshape(lowerCAmelCase_ ) _a : Optional[int] = value.reshape(lowerCAmelCase_ ) for path in paths: _a : Dict = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) _a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) _a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: _a : int = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _a : List[str] = old_checkpoint[path['old']][:, :, 0] else: _a : Dict = old_checkpoint[path['old']] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _a : Optional[int] = {} _a : Dict = checkpoint['time_embed.0.weight'] _a : Tuple = checkpoint['time_embed.0.bias'] _a : Union[str, Any] = checkpoint['time_embed.2.weight'] _a : List[str] = checkpoint['time_embed.2.bias'] _a : List[str] = checkpoint['input_blocks.0.0.weight'] _a : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] _a : Optional[int] = checkpoint['out.0.weight'] _a : int = checkpoint['out.0.bias'] _a : List[str] = checkpoint['out.2.weight'] _a : Optional[int] = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) _a : Dict = { layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the middle blocks only _a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) _a : Union[str, Any] = { layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } # Retrieves the keys for the output blocks only _a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) _a : str = { layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key] for layer_id in range(lowerCAmelCase_ ) } for i in range(1 , lowerCAmelCase_ ): _a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1) _a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1) _a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key] if f"""input_blocks.{i}.0.op.weight""" in checkpoint: _a : List[Any] = checkpoint[ f"""input_blocks.{i}.0.op.weight""" ] _a : Union[str, Any] = checkpoint[ f"""input_blocks.{i}.0.op.bias""" ] continue _a : Any = renew_resnet_paths(lowerCAmelCase_ ) _a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""} _a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ ) if len(lowerCAmelCase_ ): _a : List[str] = renew_attention_paths(lowerCAmelCase_ ) _a : List[Any] = { 'old': f"""input_blocks.{i}.1""", 'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : Optional[Any] = { f"""input_blocks.{i}.1.qkv.bias""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""input_blocks.{i}.1.qkv.weight""": { 'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , ) _a : str = middle_blocks[0] _a : Tuple = middle_blocks[1] _a : Any = middle_blocks[2] _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : Any = renew_resnet_paths(lowerCAmelCase_ ) assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ ) _a : int = renew_attention_paths(lowerCAmelCase_ ) _a : int = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ ) for i in range(lowerCAmelCase_ ): _a : List[str] = i // (config['num_res_blocks'] + 1) _a : Any = i % (config['num_res_blocks'] + 1) _a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]] _a : Optional[Any] = {} for layer in output_block_layers: _a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(lowerCAmelCase_ ) else: _a : str = [layer_name] if len(lowerCAmelCase_ ) > 1: _a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key] _a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key] _a : Dict = renew_resnet_paths(lowerCAmelCase_ ) _a : str = renew_resnet_paths(lowerCAmelCase_ ) _a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""} assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) _a : Tuple = checkpoint[ f"""output_blocks.{i}.{index}.conv.weight""" ] _a : List[str] = checkpoint[ f"""output_blocks.{i}.{index}.conv.bias""" ] # Clear attentions as they have been attributed above. if len(lowerCAmelCase_ ) == 2: _a : Union[str, Any] = [] if len(lowerCAmelCase_ ): _a : Tuple = renew_attention_paths(lowerCAmelCase_ ) _a : str = { 'old': f"""output_blocks.{i}.1""", 'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""", } _a : List[Any] = { f"""output_blocks.{i}.1.qkv.bias""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""", }, f"""output_blocks.{i}.1.qkv.weight""": { 'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""", 'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""", 'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""", }, } assign_to_checkpoint( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , ) else: _a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] ) _a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] ) _a : Union[str, Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __lowerCAmelCase = parser.parse_args() __lowerCAmelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: __lowerCAmelCase = json.loads(f.read()) __lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __lowerCAmelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
89
1
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __magic_name__ : def __init__( self : Union[str, Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : Optional[int]=32 ,_UpperCAmelCase : Any=16 ,_UpperCAmelCase : Tuple=3 ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Union[str, Any]=32 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : str=[0, 1, 2, 3] ,_UpperCAmelCase : Optional[Any]=4 ,_UpperCAmelCase : int=37 ,_UpperCAmelCase : int="gelu" ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : List[Any]=0.02 ,_UpperCAmelCase : str=3 ,_UpperCAmelCase : Dict=[1, 384, 24, 24] ,_UpperCAmelCase : str=True ,_UpperCAmelCase : Any=None ,): _a : Optional[Any] = parent _a : Any = batch_size _a : str = image_size _a : Any = patch_size _a : Dict = num_channels _a : int = is_training _a : str = use_labels _a : List[Any] = hidden_size _a : Dict = num_hidden_layers _a : int = backbone_out_indices _a : Any = num_attention_heads _a : Dict = intermediate_size _a : Dict = hidden_act _a : Optional[int] = hidden_dropout_prob _a : Dict = attention_probs_dropout_prob _a : Union[str, Any] = initializer_range _a : Tuple = num_labels _a : Any = backbone_featmap_shape _a : Any = scope _a : Dict = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) _a : List[str] = (image_size // patch_size) ** 2 _a : Tuple = num_patches + 1 def __lowercase ( self : Optional[Any] ): _a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _a : Dict = None if self.use_labels: _a : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) _a : Any = self.get_config() return config, pixel_values, labels def __lowercase ( self : Optional[int] ): _a : Union[str, Any] = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [96, 192, 384, 768], 'num_groups': 2, } return DPTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=_UpperCAmelCase ,backbone_featmap_shape=self.backbone_featmap_shape ,) def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ): _a : Tuple = DPTModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Optional[int] = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def __lowercase ( self : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[str, Any] ): _a : Dict = self.num_labels _a : List[Any] = DPTForDepthEstimation(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Union[str, Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) ) def __lowercase ( self : str ,_UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : str ): _a : Union[str, Any] = self.num_labels _a : Union[str, Any] = DPTForSemanticSegmentation(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _a : Tuple = model(_UpperCAmelCase ,labels=_UpperCAmelCase ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) ) def __lowercase ( self : Dict ): _a : Dict = self.prepare_config_and_inputs() _a , _a , _a : Any = config_and_inputs _a : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : List[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () lowerCAmelCase : Tuple = ( { 'depth-estimation': DPTForDepthEstimation, 'feature-extraction': DPTModel, 'image-segmentation': DPTForSemanticSegmentation, } if is_torch_available() else {} ) lowerCAmelCase : Dict = False lowerCAmelCase : Any = False lowerCAmelCase : Optional[Any] = False def __lowercase ( self : Optional[int] ): _a : Union[str, Any] = DPTModelTester(self ) _a : List[Any] = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 ) def __lowercase ( self : Dict ): self.config_tester.run_common_tests() @unittest.skip(reason='DPT does not use inputs_embeds' ) def __lowercase ( self : List[Any] ): pass def __lowercase ( self : str ): _a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : str = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) _a : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) ) def __lowercase ( self : Dict ): _a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a : str = model_class(_UpperCAmelCase ) _a : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a : str = [*signature.parameters.keys()] _a : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] ,_UpperCAmelCase ) def __lowercase ( self : Any ): _a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def __lowercase ( self : List[str] ): _a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase ) def __lowercase ( self : Optional[int] ): _a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase ) def __lowercase ( self : str ): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue _a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() _a : Optional[Any] = True if model_class in get_values(_UpperCAmelCase ): continue _a : List[Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.train() _a : int = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : str = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : Optional[Any] ): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue _a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common() _a : Optional[int] = False _a : Optional[int] = True if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing: continue _a : Any = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.gradient_checkpointing_enable() model.train() _a : Optional[Any] = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase ) _a : str = model(**_UpperCAmelCase ).loss loss.backward() def __lowercase ( self : Optional[Any] ): _a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() _a : List[str] = _config_zero_init(_UpperCAmelCase ) for model_class in self.all_model_classes: _a : Optional[Any] = model_class(config=_UpperCAmelCase ) # Skip the check for the backbone _a : Optional[Any] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": _a : int = [F"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __lowercase ( self : Tuple ): pass @slow def __lowercase ( self : Tuple ): for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: _a : int = DPTModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __lowercase ( self : str ): # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type _a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common() _a : int = 'add' with self.assertRaises(_UpperCAmelCase ): _a : Dict = DPTForDepthEstimation(_UpperCAmelCase ) def __lowerCamelCase ( ) -> Tuple: _a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision @slow class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : str ): _a : Optional[int] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' ) _a : List[Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase ) _a : Optional[int] = prepare_img() _a : str = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _a : int = model(**_UpperCAmelCase ) _a : Union[str, Any] = outputs.predicted_depth # verify the predicted depth _a : List[str] = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape ,_UpperCAmelCase ) _a : Optional[Any] = torch.tensor( [[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,_UpperCAmelCase ,atol=1E-4 ) )
89
'''simple docstring''' import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array: _a : Optional[int] = f"""{sampling_rate}""" _a : Any = '1' _a : Optional[int] = 'f32le' _a : Any = [ 'ffmpeg', '-i', 'pipe:0', '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] try: with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: _a : int = ffmpeg_process.communicate(lowerCAmelCase_ ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error _a : int = output_stream[0] _a : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "f32le" , ) -> Union[str, Any]: _a : List[str] = f"""{sampling_rate}""" _a : List[str] = '1' if format_for_conversion == "s16le": _a : List[Any] = 2 elif format_for_conversion == "f32le": _a : Dict = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) _a : Any = platform.system() if system == "Linux": _a : Union[str, Any] = 'alsa' _a : Union[str, Any] = 'default' elif system == "Darwin": _a : Any = 'avfoundation' _a : Optional[int] = ':0' elif system == "Windows": _a : str = 'dshow' _a : Tuple = 'default' _a : str = [ 'ffmpeg', '-f', format_, '-i', input_, '-ac', ac, '-ar', ar, '-f', format_for_conversion, '-fflags', 'nobuffer', '-hide_banner', '-loglevel', 'quiet', 'pipe:1', ] _a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample _a : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ ) for item in iterator: yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "f32le" , ) -> str: if stream_chunk_s is not None: _a : str = stream_chunk_s else: _a : List[str] = chunk_length_s _a : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ ) if format_for_conversion == "s16le": _a : Optional[Any] = np.intaa _a : List[Any] = 2 elif format_for_conversion == "f32le": _a : Tuple = np.floataa _a : Any = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: _a : str = chunk_length_s / 6 _a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(lowerCAmelCase_ , (int, float) ): _a : List[str] = [stride_length_s, stride_length_s] _a : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample _a : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample _a : Any = datetime.datetime.now() _a : Dict = datetime.timedelta(seconds=lowerCAmelCase_ ) for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ): # Put everything back in numpy scale _a : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ ) _a : List[str] = ( item['stride'][0] // size_of_sample, item['stride'][1] // size_of_sample, ) _a : Union[str, Any] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[Any]: _a : Tuple = B'' _a , _a : str = stride if stride_left + stride_right >= chunk_len: raise ValueError( f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) _a : Optional[int] = 0 for raw in iterator: acc += raw if stream and len(lowerCAmelCase_ ) < chunk_len: _a : str = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(lowerCAmelCase_ ) >= chunk_len: # We are flushing the accumulator _a : Union[str, Any] = (_stride_left, stride_right) _a : Dict = {'raw': acc[:chunk_len], 'stride': stride} if stream: _a : List[str] = False yield item _a : int = stride_left _a : List[Any] = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(lowerCAmelCase_ ) > stride_left: _a : str = {'raw': acc, 'stride': (_stride_left, 0)} if stream: _a : str = False yield item def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _a : Optional[Any] = 2**24 # 16Mo try: with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process: while True: _a : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
89
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = { '''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''', '''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''', '''junnyu/roformer_chinese_char_small''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json''' ), '''junnyu/roformer_chinese_char_base''': ( '''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json''' ), '''junnyu/roformer_small_discriminator''': ( '''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json''' ), '''junnyu/roformer_small_generator''': ( '''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json''' ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = 'roformer' def __init__( self : Tuple ,_UpperCAmelCase : str=50000 ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[str]=768 ,_UpperCAmelCase : int=12 ,_UpperCAmelCase : int=12 ,_UpperCAmelCase : Any=3072 ,_UpperCAmelCase : Optional[Any]="gelu" ,_UpperCAmelCase : List[Any]=0.1 ,_UpperCAmelCase : List[Any]=0.1 ,_UpperCAmelCase : int=1536 ,_UpperCAmelCase : Union[str, Any]=2 ,_UpperCAmelCase : Dict=0.02 ,_UpperCAmelCase : Dict=1E-12 ,_UpperCAmelCase : int=0 ,_UpperCAmelCase : Dict=False ,_UpperCAmelCase : Any=True ,**_UpperCAmelCase : int ,): super().__init__(pad_token_id=_UpperCAmelCase ,**_UpperCAmelCase ) _a : List[Any] = vocab_size _a : Tuple = hidden_size if embedding_size is None else embedding_size _a : int = hidden_size _a : Union[str, Any] = num_hidden_layers _a : Any = num_attention_heads _a : Tuple = hidden_act _a : Optional[Any] = intermediate_size _a : str = hidden_dropout_prob _a : str = attention_probs_dropout_prob _a : int = max_position_embeddings _a : int = type_vocab_size _a : str = initializer_range _a : List[Any] = layer_norm_eps _a : Any = rotary_value _a : int = use_cache class __magic_name__ ( _UpperCamelCase ): @property def __lowercase ( self : Optional[Any] ): if self.task == "multiple-choice": _a : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _a : int = {0: 'batch', 1: 'sequence'} _a : List[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
89
'''simple docstring''' __lowerCAmelCase = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]: _a : List[Any] = set() # keep track of all the paths to be checked _a : Any = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _a : Tuple = queue.pop(0 ) # get the last node from the path _a : Tuple = path[-1] if node not in explored: _a : Optional[Any] = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _a : Any = list(lowerCAmelCase_ ) new_path.append(lowerCAmelCase_ ) queue.append(lowerCAmelCase_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(lowerCAmelCase_ ) # in case there's no path between the 2 nodes return [] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _a : Optional[int] = [start] _a : Dict = set(lowerCAmelCase_ ) # Keep tab on distances from `start` node. _a : Dict = {start: 0, target: -1} while queue: _a : List[str] = queue.pop(0 ) if node == target: _a : Any = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(lowerCAmelCase_ ) queue.append(lowerCAmelCase_ ) _a : Any = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
89
1
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class __magic_name__ ( _UpperCamelCase ): def __init__( self : Union[str, Any] ): _a : Dict = [] def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Any ): self.events.append('on_init_end' ) def __lowercase ( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Tuple ,**_UpperCAmelCase : Dict ): self.events.append('on_train_begin' ) def __lowercase ( self : List[str] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : List[Any] ): self.events.append('on_train_end' ) def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Any ,**_UpperCAmelCase : Optional[Any] ): self.events.append('on_epoch_begin' ) def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ): self.events.append('on_epoch_end' ) def __lowercase ( self : Dict ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[int] ): self.events.append('on_step_begin' ) def __lowercase ( self : str ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ,**_UpperCAmelCase : Dict ): self.events.append('on_step_end' ) def __lowercase ( self : Dict ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[Any] ,**_UpperCAmelCase : Any ): self.events.append('on_evaluate' ) def __lowercase ( self : int ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : int ): self.events.append('on_predict' ) def __lowercase ( self : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Tuple ): self.events.append('on_save' ) def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Union[str, Any] ): self.events.append('on_log' ) def __lowercase ( self : Optional[int] ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int ,**_UpperCAmelCase : List[str] ): self.events.append('on_prediction_step' ) @require_torch class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : int ): _a : Optional[int] = tempfile.mkdtemp() def __lowercase ( self : str ): shutil.rmtree(self.output_dir ) def __lowercase ( self : int ,_UpperCAmelCase : Dict=0 ,_UpperCAmelCase : str=0 ,_UpperCAmelCase : Dict=64 ,_UpperCAmelCase : List[str]=64 ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=False ,**_UpperCAmelCase : int ): # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. _a : int = RegressionDataset(length=_UpperCAmelCase ) _a : str = RegressionDataset(length=_UpperCAmelCase ) _a : List[Any] = RegressionModelConfig(a=_UpperCAmelCase ,b=_UpperCAmelCase ) _a : Optional[int] = RegressionPreTrainedModel(_UpperCAmelCase ) _a : Optional[int] = TrainingArguments(self.output_dir ,disable_tqdm=_UpperCAmelCase ,report_to=[] ,**_UpperCAmelCase ) return Trainer( _UpperCAmelCase ,_UpperCAmelCase ,train_dataset=_UpperCAmelCase ,eval_dataset=_UpperCAmelCase ,callbacks=_UpperCAmelCase ,) def __lowercase ( self : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any] ): self.assertEqual(len(_UpperCAmelCase ) ,len(_UpperCAmelCase ) ) # Order doesn't matter _a : List[Any] = sorted(_UpperCAmelCase ,key=lambda _UpperCAmelCase : cb.__name__ if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else cb.__class__.__name__ ) _a : List[str] = sorted(_UpperCAmelCase ,key=lambda _UpperCAmelCase : cb.__name__ if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) else cb.__class__.__name__ ) for cba, cba in zip(_UpperCAmelCase ,_UpperCAmelCase ): if isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and isinstance(_UpperCAmelCase ,_UpperCAmelCase ): self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase ) elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): self.assertEqual(_UpperCAmelCase ,cba.__class__ ) elif not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and isinstance(_UpperCAmelCase ,_UpperCAmelCase ): self.assertEqual(cba.__class__ ,_UpperCAmelCase ) else: self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : Any ,_UpperCAmelCase : Optional[Any] ): _a : str = ['on_init_end', 'on_train_begin'] _a : str = 0 _a : Optional[int] = len(trainer.get_eval_dataloader() ) _a : List[Any] = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate'] for _ in range(trainer.state.num_train_epochs ): expected_events.append('on_epoch_begin' ) for _ in range(_UpperCAmelCase ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('on_log' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('on_save' ) expected_events.append('on_epoch_end' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def __lowercase ( self : Optional[Any] ): _a : str = self.get_trainer() _a : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) # Callbacks passed at init are added to the default callbacks _a : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _a : Union[str, Any] = self.get_trainer(disable_tqdm=_UpperCAmelCase ) _a : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) def __lowercase ( self : Tuple ): _a : List[str] = DEFAULT_CALLBACKS.copy() + [ProgressCallback] _a : Optional[int] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(_UpperCAmelCase ) expected_callbacks.remove(_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) _a : Optional[Any] = self.get_trainer() _a : List[Any] = trainer.pop_callback(_UpperCAmelCase ) self.assertEqual(cb.__class__ ,_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) trainer.add_callback(_UpperCAmelCase ) expected_callbacks.insert(0 ,_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) # We can also add, pop, or remove by instance _a : str = self.get_trainer() _a : Optional[Any] = trainer.callback_handler.callbacks[0] trainer.remove_callback(_UpperCAmelCase ) expected_callbacks.remove(_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) _a : Tuple = self.get_trainer() _a : Optional[Any] = trainer.callback_handler.callbacks[0] _a : Optional[Any] = trainer.pop_callback(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase ,_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) trainer.add_callback(_UpperCAmelCase ) expected_callbacks.insert(0 ,_UpperCAmelCase ) self.check_callbacks_equality(trainer.callback_handler.callbacks ,_UpperCAmelCase ) def __lowercase ( self : List[Any] ): import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='ignore' ,category=_UpperCAmelCase ) _a : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _a : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) ) # Independent log/save/eval _a : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ,logging_steps=5 ) trainer.train() _a : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) ) _a : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] ,save_steps=5 ) trainer.train() _a : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) ) _a : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] ,eval_steps=5 ,evaluation_strategy='steps' ) trainer.train() _a : Tuple = trainer.callback_handler.callbacks[-2].events self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) ) _a : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ,evaluation_strategy='epoch' ) trainer.train() _a : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) ) # A bit of everything _a : Optional[int] = self.get_trainer( callbacks=[MyTestTrainerCallback] ,logging_steps=3 ,save_steps=10 ,eval_steps=5 ,evaluation_strategy='steps' ,) trainer.train() _a : List[str] = trainer.callback_handler.callbacks[-2].events self.assertEqual(_UpperCAmelCase ,self.get_expected_events(_UpperCAmelCase ) ) # warning should be emitted for duplicated callbacks with patch('transformers.trainer_callback.logger.warning' ) as warn_mock: _a : List[str] = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] ,) assert str(_UpperCAmelCase ) in warn_mock.call_args[0][0]
89
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwinForImageClassification''', '''SwinForMaskedImageModeling''', '''SwinModel''', '''SwinPreTrainedModel''', '''SwinBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ '''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSwinForImageClassification''', '''TFSwinForMaskedImageModeling''', '''TFSwinModel''', '''TFSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
89
1
'''simple docstring''' from __future__ import annotations from random import choice def __lowerCamelCase ( lowerCAmelCase_ ) -> Tuple: return choice(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int: _a : Tuple = random_pivot(lowerCAmelCase_ ) # partition based on pivot # linear time _a : Union[str, Any] = [e for e in lst if e < pivot] _a : Tuple = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(lowerCAmelCase_ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(lowerCAmelCase_ ) < k - 1: return kth_number(lowerCAmelCase_ , k - len(lowerCAmelCase_ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
89
'''simple docstring''' import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class __magic_name__ ( _UpperCamelCase , unittest.TestCase ): lowerCAmelCase : Optional[int] = BarthezTokenizer lowerCAmelCase : int = BarthezTokenizerFast lowerCAmelCase : Dict = True lowerCAmelCase : str = True def __lowercase ( self : List[Any] ): super().setUp() _a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase ) _a : Union[str, Any] = tokenizer def __lowercase ( self : Tuple ): _a : Optional[Any] = '<pad>' _a : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase ) def __lowercase ( self : str ): _a : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<s>' ) self.assertEqual(vocab_keys[1] ,'<pad>' ) self.assertEqual(vocab_keys[-1] ,'<mask>' ) self.assertEqual(len(_UpperCAmelCase ) ,101122 ) def __lowercase ( self : Dict ): self.assertEqual(self.get_tokenizer().vocab_size ,101122 ) @require_torch def __lowercase ( self : Dict ): _a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _a : Dict = [0, 57, 3018, 70307, 91, 2] _a : Dict = self.tokenizer( _UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' ) self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase ) self.assertEqual((2, 6) ,batch.input_ids.shape ) self.assertEqual((2, 6) ,batch.attention_mask.shape ) _a : Tuple = batch.input_ids.tolist()[0] self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ): if not self.test_rust_tokenizer: return _a : str = self.get_tokenizer() _a : List[str] = self.get_rust_tokenizer() _a : Dict = 'I was born in 92000, and this is falsé.' _a : List[Any] = tokenizer.tokenize(_UpperCAmelCase ) _a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) _a : Union[str, Any] = self.get_rust_tokenizer() _a : Any = tokenizer.encode(_UpperCAmelCase ) _a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase ) @slow def __lowercase ( self : Optional[int] ): # fmt: off _a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _a : Optional[Any] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
89
1
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __lowerCAmelCase = ''' Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)["depth"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline("depth-estimation") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to("cuda") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to("cuda") >>> img = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/cat.png" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda") >>> prompt = "A robot, 4k photo" >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature" >>> generator = torch.Generator(device="cuda").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save("robot_cat.png") ``` ''' def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=8 ) -> List[str]: _a : List[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _a : List[Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class __magic_name__ ( _UpperCamelCase ): def __init__( self : Optional[Any] ,_UpperCAmelCase : UNetaDConditionModel ,_UpperCAmelCase : DDPMScheduler ,_UpperCAmelCase : VQModel ,): super().__init__() self.register_modules( unet=_UpperCAmelCase ,scheduler=_UpperCAmelCase ,movq=_UpperCAmelCase ,) _a : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __lowercase ( self : int ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Tuple ): if latents is None: _a : Union[str, Any] = randn_tensor(_UpperCAmelCase ,generator=_UpperCAmelCase ,device=_UpperCAmelCase ,dtype=_UpperCAmelCase ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) _a : Optional[int] = latents.to(_UpperCAmelCase ) _a : str = latents * scheduler.init_noise_sigma return latents def __lowercase ( self : Tuple ,_UpperCAmelCase : int=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) _a : int = torch.device(F"""cuda:{gpu_id}""" ) _a : Optional[Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_UpperCAmelCase ,_UpperCAmelCase ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : int=0 ): if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) _a : Tuple = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('cpu' ,silence_dtype_warnings=_UpperCAmelCase ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _a : Optional[Any] = None for cpu_offloaded_model in [self.unet, self.movq]: _a , _a : str = cpu_offload_with_hook(_UpperCAmelCase ,_UpperCAmelCase ,prev_module_hook=_UpperCAmelCase ) # We'll offload the last model manually. _a : List[Any] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __lowercase ( self : int ): if not hasattr(self.unet ,'_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(_UpperCAmelCase ,'_hf_hook' ) and hasattr(module._hf_hook ,'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_UpperCAmelCase ) def __call__( self : List[Any] ,_UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] ,_UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] ,_UpperCAmelCase : torch.FloatTensor ,_UpperCAmelCase : int = 512 ,_UpperCAmelCase : int = 512 ,_UpperCAmelCase : int = 100 ,_UpperCAmelCase : float = 4.0 ,_UpperCAmelCase : int = 1 ,_UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_UpperCAmelCase : Optional[torch.FloatTensor] = None ,_UpperCAmelCase : Optional[str] = "pil" ,_UpperCAmelCase : bool = True ,): _a : List[Any] = self._execution_device _a : Tuple = guidance_scale > 1.0 if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Dict = torch.cat(_UpperCAmelCase ,dim=0 ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Optional[Any] = torch.cat(_UpperCAmelCase ,dim=0 ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Dict = torch.cat(_UpperCAmelCase ,dim=0 ) _a : Optional[int] = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: _a : List[Any] = image_embeds.repeat_interleave(_UpperCAmelCase ,dim=0 ) _a : Optional[Any] = negative_image_embeds.repeat_interleave(_UpperCAmelCase ,dim=0 ) _a : str = hint.repeat_interleave(_UpperCAmelCase ,dim=0 ) _a : Optional[int] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=_UpperCAmelCase ) _a : Any = torch.cat([hint, hint] ,dim=0 ).to(dtype=self.unet.dtype ,device=_UpperCAmelCase ) self.scheduler.set_timesteps(_UpperCAmelCase ,device=_UpperCAmelCase ) _a : Optional[int] = self.scheduler.timesteps _a : Union[str, Any] = self.movq.config.latent_channels _a , _a : List[Any] = downscale_height_and_width(_UpperCAmelCase ,_UpperCAmelCase ,self.movq_scale_factor ) # create initial latent _a : int = self.prepare_latents( (batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,self.scheduler ,) for i, t in enumerate(self.progress_bar(_UpperCAmelCase ) ): # expand the latents if we are doing classifier free guidance _a : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _a : Union[str, Any] = {'image_embeds': image_embeds, 'hint': hint} _a : Union[str, Any] = self.unet( sample=_UpperCAmelCase ,timestep=_UpperCAmelCase ,encoder_hidden_states=_UpperCAmelCase ,added_cond_kwargs=_UpperCAmelCase ,return_dict=_UpperCAmelCase ,)[0] if do_classifier_free_guidance: _a , _a : Optional[int] = noise_pred.split(latents.shape[1] ,dim=1 ) _a , _a : List[Any] = noise_pred.chunk(2 ) _a , _a : str = variance_pred.chunk(2 ) _a : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _a : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 ) if not ( hasattr(self.scheduler.config ,'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _a , _a : Any = noise_pred.split(latents.shape[1] ,dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _a : str = self.scheduler.step( _UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,generator=_UpperCAmelCase ,)[0] # post-processing _a : str = self.movq.decode(_UpperCAmelCase ,force_not_quantize=_UpperCAmelCase )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: _a : str = image * 0.5 + 0.5 _a : str = image.clamp(0 ,1 ) _a : int = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() if output_type == "pil": _a : Optional[Any] = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCAmelCase )
89
'''simple docstring''' import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __magic_name__ ( _UpperCamelCase ): @require_torch def __lowercase ( self : Tuple ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _a : List[Any] = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(_UpperCAmelCase ) BertModel.from_pretrained(_UpperCAmelCase ) BertTokenizer.from_pretrained(_UpperCAmelCase ) pipeline(task='fill-mask' ,model=_UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _a : Tuple = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : int = '1' _a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : Any ): # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n ' _a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n ' _a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n ' # Force fetching the files so that we can use the cache _a : int = 'hf-internal-testing/tiny-random-bert' BertConfig.from_pretrained(_UpperCAmelCase ) BertModel.from_pretrained(_UpperCAmelCase ) BertTokenizer.from_pretrained(_UpperCAmelCase ) pipeline(task='fill-mask' ,model=_UpperCAmelCase ) # baseline - just load from_pretrained with normal network _a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )] # should succeed _a : str = self.get_env() _a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : List[str] ): # this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before # `transformers` is loaded, and it's too late for inside pytest - so we are changing it # while running an external program # python one-liner segments # this must be loaded before socket.socket is monkey-patched _a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n ' _a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n ' _a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n ' # baseline - just load from_pretrained with normal network _a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _a : Dict = self.get_env() _a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) # next emulate no network _a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : int = '1' _a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) @require_torch def __lowercase ( self : int ): _a : Optional[Any] = '\nfrom transformers import pipeline\n ' _a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n ' _a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n ' _a : List[Any] = self.get_env() _a : Dict = '1' _a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )] _a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,1 ,result.stderr ) self.assertIn( 'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,) @require_torch def __lowercase ( self : int ): _a : Optional[int] = '\nfrom transformers import AutoModel\n ' _a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n ' # baseline - just load from_pretrained with normal network _a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )] # should succeed _a : Tuple = self.get_env() _a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files _a : Optional[Any] = '1' _a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase ) self.assertEqual(result.returncode ,0 ,result.stderr ) self.assertIn('success' ,result.stdout.decode() )
89
1