code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_SCREAMING_SNAKE_CASE = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
_SCREAMING_SNAKE_CASE = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = ' Hello world! cécé herlolip'
_SCREAMING_SNAKE_CASE = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Union[str, Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : int = dct.pop(lowerCamelCase__ )
snake_case_ : Any = val
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[str] = torch.load(lowerCamelCase__ , map_location='cpu' )
snake_case_ : str = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Dict = emb.weight.shape
snake_case_ : Optional[Any] = nn.Linear(lowerCamelCase__ , lowerCamelCase__ , bias=lowerCamelCase__ )
snake_case_ : Tuple = emb.weight.data
return lin_layer
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a=None ):
if not os.path.exists(lowerCamelCase__ ):
snake_case_ : Optional[int] = torch.hub.load('pytorch/fairseq' , lowerCamelCase__ ).eval()
else:
snake_case_ : Optional[Any] = load_xsum_checkpoint(lowerCamelCase__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
snake_case_ : Optional[Any] = checkpoint_path.replace('.' , '-' )
snake_case_ : Union[str, Any] = BartConfig.from_pretrained(lowerCamelCase__ )
snake_case_ : Optional[Any] = bart.encode(lowerCamelCase__ ).unsqueeze(0 )
snake_case_ : Optional[Any] = BartTokenizer.from_pretrained(lowerCamelCase__ ).encode(lowerCamelCase__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(lowerCamelCase__ , lowerCamelCase__ ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
snake_case_ : List[str] = bart.state_dict()
remove_ignore_keys_(lowerCamelCase__ )
snake_case_ : List[Any] = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
snake_case_ : Dict = BartForSequenceClassification(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
snake_case_ : Tuple = bart.predict('mnli' , lowerCamelCase__ , return_logits=lowerCamelCase__ )
snake_case_ : Dict = model(lowerCamelCase__ )[0] # logits
else: # no classification heads to worry about
snake_case_ : Union[str, Any] = bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase__ )
snake_case_ : Optional[int] = state_dict["decoder.embed_tokens.weight"]
snake_case_ : int = bart.extract_features(lowerCamelCase__ )
if hf_checkpoint_name == "facebook/bart-large":
snake_case_ : List[str] = BartModel(lowerCamelCase__ ).eval()
model.load_state_dict(lowerCamelCase__ )
snake_case_ : Dict = model(lowerCamelCase__ ).model[0]
else:
snake_case_ : int = BartForConditionalGeneration(lowerCamelCase__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase__ )
if hasattr(lowerCamelCase__ , 'lm_head' ):
snake_case_ : Union[str, Any] = make_linear_from_emb(model.model.shared )
snake_case_ : str = model.model(lowerCamelCase__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 327
|
"""simple docstring"""
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 144
| 0
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__A = False
__A = True
__A = False
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__A = parser.parse_args()
__A = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
__A = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
__A = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
__A = reader.read()
__A = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
__A = UNetaDModel(**config)
else:
__A = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
__A = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__A = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__A = config[key]
del config[key]
__A = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
__A = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
__A = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
__A = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
__A = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
__A = param_value
__A = True
if not has_changed:
__A = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 278
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '''▁'''
__A = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__A = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__A = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
__A = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : str = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = ["input_ids", "attention_mask"]
UpperCamelCase_ : List[int] = []
UpperCamelCase_ : List[int] = []
def __init__( self : str , A__ : str , A__ : Optional[Any] , A__ : Union[str, Any]=None , A__ : Dict=None , A__ : Any="<s>" , A__ : Union[str, Any]="</s>" , A__ : Tuple="</s>" , A__ : Dict="<pad>" , A__ : List[Any]="<unk>" , A__ : str="m2m100" , A__ : Optional[Dict[str, Any]] = None , A__ : List[Any]=8 , **A__ : Union[str, Any] , ) -> None:
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
_snake_case = language_codes
_snake_case = FAIRSEQ_LANGUAGE_CODES[language_codes]
_snake_case = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
_snake_case = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A__ )
for lang_code in fairseq_language_code
if self.get_lang_token(A__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A__ , tgt_lang=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , unk_token=A__ , pad_token=A__ , language_codes=A__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A__ , **A__ , )
_snake_case = vocab_file
_snake_case = load_json(A__ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = spm_file
_snake_case = load_spm(A__ , self.sp_model_kwargs )
_snake_case = len(self.encoder )
_snake_case = {
self.get_lang_token(A__ ): self.encoder_size + i for i, lang_code in enumerate(A__ )
}
_snake_case = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A__ )}
_snake_case = {v: k for k, v in self.lang_token_to_id.items()}
_snake_case = src_lang if src_lang is not None else '''en'''
_snake_case = tgt_lang
_snake_case = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_snake_case = num_madeup_words
@property
def UpperCamelCase_ ( self : int ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase_ ( self : Dict ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : List[str] , A__ : str ) -> None:
_snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : Any , A__ : str ) -> List[str]:
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCamelCase_ ( self : Optional[int] , A__ : Dict ) -> str:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A__ , self.encoder[self.unk_token] )
def UpperCamelCase_ ( self : Union[str, Any] , A__ : int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A__ , self.unk_token )
def UpperCamelCase_ ( self : Optional[int] , A__ : Optional[int] ) -> List[Any]:
_snake_case = []
_snake_case = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__ ) + token
_snake_case = []
else:
current_sub_tokens.append(A__ )
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def UpperCamelCase_ ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
_snake_case = [1] * len(self.prefix_tokens )
_snake_case = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def UpperCamelCase_ ( self : Tuple , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : str ) -> Dict:
_snake_case = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Dict:
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : Union[str, Any] , A__ : Dict ) -> None:
_snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case = {}
_snake_case = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase_ ( self : Any , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
_snake_case = Path(A__ )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
_snake_case = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_snake_case = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , A__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A__ )
elif not os.path.isfile(self.spm_file ):
with open(A__ , '''wb''' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (str(A__ ), str(A__ ))
def UpperCamelCase_ ( self : Optional[int] , A__ : List[str] , A__ : str = "en" , A__ : Optional[List[str]] = None , A__ : str = "ro" , **A__ : List[Any] , ) -> BatchEncoding:
_snake_case = src_lang
_snake_case = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def UpperCamelCase_ ( self : List[str] , A__ : int , A__ : Optional[str] , A__ : Optional[str] , **A__ : Union[str, Any] ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_snake_case = src_lang
_snake_case = self(A__ , add_special_tokens=A__ , **A__ )
_snake_case = self.get_lang_id(A__ )
_snake_case = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : Dict ) -> Optional[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : Optional[Any] ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : List[Any] , A__ : str ) -> None:
_snake_case = self.get_lang_token(A__ )
_snake_case = self.lang_token_to_id[lang_token]
_snake_case = [self.cur_lang_id]
_snake_case = [self.eos_token_id]
def UpperCamelCase_ ( self : List[str] , A__ : str ) -> None:
_snake_case = self.get_lang_token(A__ )
_snake_case = self.lang_token_to_id[lang_token]
_snake_case = [self.cur_lang_id]
_snake_case = [self.eos_token_id]
def UpperCamelCase_ ( self : Dict , A__ : str ) -> str:
return self.lang_code_to_token[lang]
def UpperCamelCase_ ( self : Tuple , A__ : str ) -> int:
_snake_case = self.get_lang_token(A__ )
return self.lang_token_to_id[lang_token]
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
_snake_case = sentencepiece.SentencePieceProcessor(**_UpperCamelCase )
spm.Load(str(_UpperCamelCase ) )
return spm
def snake_case_(_UpperCamelCase ) -> Union[Dict, List]:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' ) as f:
return json.load(_UpperCamelCase )
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=2 )
| 278
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowercase : List[str] = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "facebook/nllb-200-distilled-600M"
lowerCamelCase : List[str] = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
lowerCamelCase : int = "translator"
lowerCamelCase : Optional[Any] = AutoTokenizer
lowerCamelCase : str = AutoModelForSeqaSeqLM
lowerCamelCase : Dict = LANGUAGE_CODES
lowerCamelCase : int = ["text", "text", "text"]
lowerCamelCase : int = ["text"]
def UpperCAmelCase__ (self , A , A , A ):
if src_lang not in self.lang_to_code:
raise ValueError(F"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"""{tgt_lang} is not a supported language.""" )
lowerCamelCase_ : List[Any] = self.lang_to_code[src_lang]
lowerCamelCase_ : Tuple = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A , return_tensors='''pt''' , src_lang=A , tgt_lang=A )
def UpperCAmelCase__ (self , A ):
return self.model.generate(**A )
def UpperCAmelCase__ (self , A ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A )
| 318
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318
| 1
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def UpperCamelCase ( a ) -> Optional[int]:
'''simple docstring'''
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
for char in word:
__magic_name__ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
__magic_name__ = set()
for token in tokens:
__magic_name__ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
__magic_name__ = list(__UpperCamelCase )
return word_list
def UpperCamelCase ( a , a ) -> List[Any]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
__magic_name__ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
__magic_name__ = bert_tokens
__magic_name__ , __magic_name__ = 0, len(__UpperCamelCase )
while start < end:
__magic_name__ = True
if is_chinese(bert_word[start] ):
__magic_name__ = min(end - start , __UpperCamelCase )
for i in range(__UpperCamelCase , 1 , -1 ):
__magic_name__ = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__magic_name__ = '''##''' + bert_word[j]
__magic_name__ = start + i
__magic_name__ = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase ( a , a , a ) -> List[Any]:
'''simple docstring'''
__magic_name__ = []
for i in range(0 , len(__UpperCamelCase ) , 100 ):
__magic_name__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws
__magic_name__ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
__magic_name__ = []
for i in range(0 , len(__UpperCamelCase ) , 100 ):
__magic_name__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__UpperCamelCase , truncation=__UpperCamelCase , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
__magic_name__ = []
for input_ids, chinese_word in zip(__UpperCamelCase , __UpperCamelCase ):
__magic_name__ = []
for id in input_ids:
__magic_name__ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
__magic_name__ = add_sub_symbol(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
__magic_name__ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def UpperCamelCase ( a ) -> Union[str, Any]:
'''simple docstring'''
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
__magic_name__ = f.readlines()
__magic_name__ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__magic_name__ = LTP(args.ltp ) # faster in GPU device
__magic_name__ = BertTokenizer.from_pretrained(args.bert )
__magic_name__ = prepare_ref(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
__magic_name__ = [json.dumps(__UpperCamelCase ) + '''\n''' for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
_lowerCAmelCase = parser.parse_args()
main(args)
| 369
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , a__ : Any , a__ : Tuple=sys.maxsize ):
__magic_name__ = '''bilinear'''
__magic_name__ = max_size
__magic_name__ = short_edge_length
def __call__( self : Tuple , a__ : List[str] ):
__magic_name__ = []
for img in imgs:
__magic_name__ , __magic_name__ = img.shape[:2]
# later: provide list and randomly choose index for resize
__magic_name__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
__magic_name__ = size * 1.0 / min(a__ , a__ )
if h < w:
__magic_name__ , __magic_name__ = size, scale * w
else:
__magic_name__ , __magic_name__ = scale * h, size
if max(a__ , a__ ) > self.max_size:
__magic_name__ = self.max_size * 1.0 / max(a__ , a__ )
__magic_name__ = newh * scale
__magic_name__ = neww * scale
__magic_name__ = int(neww + 0.5 )
__magic_name__ = int(newh + 0.5 )
if img.dtype == np.uinta:
__magic_name__ = Image.fromarray(a__ )
__magic_name__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
__magic_name__ = np.asarray(a__ )
else:
__magic_name__ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
__magic_name__ = nn.functional.interpolate(
a__ , (newh, neww) , mode=self.interp_method , align_corners=a__ ).squeeze(0 )
img_augs.append(a__ )
return img_augs
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a__ : Tuple ):
__magic_name__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
__magic_name__ = cfg.INPUT.FORMAT
__magic_name__ = cfg.SIZE_DIVISIBILITY
__magic_name__ = cfg.PAD_VALUE
__magic_name__ = cfg.INPUT.MAX_SIZE_TEST
__magic_name__ = cfg.MODEL.DEVICE
__magic_name__ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__magic_name__ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
__magic_name__ = lambda a__ : (x - self.pixel_mean) / self.pixel_std
def snake_case__ ( self : Union[str, Any] , a__ : Dict ):
__magic_name__ = tuple(max(a__ ) for s in zip(*[img.shape for img in images] ) )
__magic_name__ = [im.shape[-2:] for im in images]
__magic_name__ = [
nn.functional.pad(
a__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(a__ , a__ )
]
return torch.stack(a__ ), torch.tensor(a__ )
def __call__( self : Dict , a__ : Dict , a__ : List[str]=False ):
with torch.no_grad():
if not isinstance(a__ , a__ ):
__magic_name__ = [images]
if single_image:
assert len(a__ ) == 1
for i in range(len(a__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(a__ , images.pop(a__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
a__ , torch.as_tensor(img_tensorize(images.pop(a__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
__magic_name__ = torch.tensor([im.shape[:2] for im in images] )
__magic_name__ = self.aug(a__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
__magic_name__ = [self.normalizer(a__ ) for x in images]
# now pad them to do the following operations
__magic_name__ , __magic_name__ = self.pad(a__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
__magic_name__ = torch.true_divide(a__ , a__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCamelCase ( a , a ) -> List[Any]:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCamelCase ( a , a ) -> Any:
'''simple docstring'''
assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!"
__magic_name__ , __magic_name__ = box_size
tensor[:, 0].clamp_(min=0 , max=a )
tensor[:, 1].clamp_(min=0 , max=a )
tensor[:, 2].clamp_(min=0 , max=a )
tensor[:, 3].clamp_(min=0 , max=a )
| 98
| 0
|
def __lowercase ( a__ ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __lowercase ( a__ ) -> bool:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = number
while duplicate > 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = divmod(a__ , 10 )
fact_sum += factorial(a__ )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
lowerCAmelCase__ : Tuple =int(input('''Enter number: ''').strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 257
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 257
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : Dict = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = ['PerceiverFeatureExtractor']
A : List[Any] = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : int = {
'andreasmadsen/efficient_mlm_m0.40': (
'https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'
),
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = '''roberta-prelayernorm'''
def __init__(self : Dict , _UpperCAmelCase : List[Any]=5_0265 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : str=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Any=3072 , _UpperCAmelCase : int="gelu" , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : List[Any]=1E-1_2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : int=2 , _UpperCAmelCase : Optional[Any]="absolute" , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
class A ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def lowerCamelCase__ (self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 146
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__snake_case : Dict = str(bin(UpperCAmelCase_ ) )
binary_number += "0" * shift_amount
return binary_number
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError('both inputs must be positive integers' )
__snake_case : Dict = str(bin(UpperCAmelCase_ ) )[2:]
if shift_amount >= len(UpperCAmelCase_ ):
return "0b0"
__snake_case : Dict = binary_number[: len(UpperCAmelCase_ ) - shift_amount]
return "0b" + shifted_binary_number
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
__snake_case : str = '0' + str(bin(UpperCAmelCase_ ) ).strip('-' )[2:]
else: # Get binary (2's complement) representation of negative number
__snake_case : Optional[Any] = len(bin(UpperCAmelCase_ )[3:] ) # Find 2's complement of number
__snake_case : Dict = bin(abs(UpperCAmelCase_ ) - (1 << binary_number_length) )[3:]
__snake_case : Optional[Any] = (
'1' + '0' * (binary_number_length - len(UpperCAmelCase_ )) + binary_number
)
if shift_amount >= len(UpperCAmelCase_ ):
return "0b" + binary_number[0] * len(UpperCAmelCase_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(UpperCAmelCase_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 172
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : Any) -> Optional[int]:
__snake_case : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : Any , _A : Any) -> str:
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = hidden_states.shape
__snake_case : Union[str, Any] = jax.image.resize(
_A , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__snake_case : List[Any] = self.conv(_A)
return hidden_states
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : Optional[Any]) -> List[Any]:
__snake_case : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : int , _A : str) -> Any:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__snake_case : Union[str, Any] = self.conv(_A)
return hidden_states
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : int = None
UpperCAmelCase : float = 0.0
UpperCAmelCase : bool = None
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : List[str]) -> Dict:
__snake_case : str = self.in_channels if self.out_channels is None else self.out_channels
__snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__snake_case : str = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : Optional[int] = nn.Dense(_A , dtype=self.dtype)
__snake_case : int = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__snake_case : str = nn.Dropout(self.dropout_prob)
__snake_case : Dict = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__snake_case : Optional[Any] = None
if use_nin_shortcut:
__snake_case : List[str] = nn.Conv(
_A , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__(self : List[Any] , _A : Union[str, Any] , _A : str , _A : int=True) -> Any:
__snake_case : List[Any] = hidden_states
__snake_case : Optional[Any] = self.norma(_A)
__snake_case : int = nn.swish(_A)
__snake_case : Optional[int] = self.conva(_A)
__snake_case : Dict = self.time_emb_proj(nn.swish(_A))
__snake_case : List[str] = jnp.expand_dims(jnp.expand_dims(_A , 1) , 1)
__snake_case : Any = hidden_states + temb
__snake_case : Tuple = self.norma(_A)
__snake_case : Dict = nn.swish(_A)
__snake_case : Union[str, Any] = self.dropout(_A , _A)
__snake_case : Union[str, Any] = self.conva(_A)
if self.conv_shortcut is not None:
__snake_case : List[Any] = self.conv_shortcut(_A)
return hidden_states + residual
| 172
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
def __init__( self , A ) -> None:
UpperCAmelCase : List[Any] = num_of_nodes
UpperCAmelCase : list[list[int]] = []
UpperCAmelCase : dict[int, int] = {}
def _lowercase( self , A , A , A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def _lowercase( self , A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _lowercase( self , A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
UpperCAmelCase : Union[str, Any] = self.find_component(A )
def _lowercase( self , A , A , A ) -> None:
if component_size[u_node] <= component_size[v_node]:
UpperCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(A )
elif component_size[u_node] >= component_size[v_node]:
UpperCAmelCase : List[str] = self.find_component(A )
component_size[u_node] += component_size[v_node]
self.set_component(A )
def _lowercase( self ) -> None:
UpperCAmelCase : List[Any] = []
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
UpperCAmelCase : Any = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
UpperCAmelCase : Tuple = edge
UpperCAmelCase : Optional[Any] = self.m_component[u]
UpperCAmelCase : Tuple = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
UpperCAmelCase : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(A , A ):
UpperCAmelCase : Optional[Any] = edge
UpperCAmelCase : str = self.m_component[u]
UpperCAmelCase : Optional[int] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(A , A , A )
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
UpperCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : List[Any] = logging.get_logger(__name__)
a : List[str] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
a : List[Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
a : List[Any] = {
"""facebook/blenderbot_small-90M""": 5_1_2,
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = BlenderbotSmallTokenizer
def __init__( self , A=None , A=None , A="<|endoftext|>" , A="<|endoftext|>" , A="<|endoftext|>" , A=False , A=True , **A , ) -> Union[str, Any]:
super().__init__(
ByteLevelBPETokenizer(
vocab=A , merges=A , add_prefix_space=A , trim_offsets=A , ) , bos_token=A , eos_token=A , unk_token=A , **A , )
UpperCAmelCase : Optional[Any] = add_prefix_space
def _lowercase( self , A , A=None ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase( self , A , A = None ) -> List[int]:
UpperCAmelCase : Any = [self.sep_token_id]
UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 338
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = '''xlm-roberta'''
def __init__( self :Optional[Any] , __magic_name__ :int=3_0522 , __magic_name__ :Any=768 , __magic_name__ :Tuple=12 , __magic_name__ :str=12 , __magic_name__ :Tuple=3072 , __magic_name__ :List[str]="gelu" , __magic_name__ :List[Any]=0.1 , __magic_name__ :Optional[int]=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :List[str]=2 , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Dict=1E-1_2 , __magic_name__ :Union[str, Any]=1 , __magic_name__ :Optional[int]=0 , __magic_name__ :Union[str, Any]=2 , __magic_name__ :Tuple="absolute" , __magic_name__ :Dict=True , __magic_name__ :List[str]=None , **__magic_name__ :Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = hidden_act
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = initializer_range
a = layer_norm_eps
a = position_embedding_type
a = use_cache
a = classifier_dropout
class __lowerCAmelCase ( __magic_name__ ):
@property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
a = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
a = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 228
|
from __future__ import annotations
def __A ( __lowerCamelCase , __lowerCamelCase = None ) -> list[list[str]]:
a = word_bank or []
# create a table
a = len(__lowerCamelCase ) + 1
a = []
for _ in range(__lowerCamelCase ):
table.append([] )
# seed value
a = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__lowerCamelCase )] == word:
a = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__lowerCamelCase )]:
combination.reverse()
return table[len(__lowerCamelCase )]
if __name__ == "__main__":
print(all_construct("jwajalapa", ["jwa", "j", "w", "a", "la", "lapa"]))
print(all_construct("rajamati", ["s", "raj", "amat", "raja", "ma", "i", "t"]))
print(
all_construct(
"hexagonosaurus",
["h", "ex", "hex", "ag", "ago", "ru", "auru", "rus", "go", "no", "o", "s"],
)
)
| 228
| 1
|
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''Salesforce/codegen-350M-mono''': 20_48,
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
lowercase__ = CodeGenTokenizer
def __init__( self: Union[str, Any], a_: Optional[int]=None, a_: Dict=None, a_: Any=None, a_: List[Any]="<|endoftext|>", a_: Dict="<|endoftext|>", a_: int="<|endoftext|>", a_: Dict=False, **a_: Optional[int], ):
'''simple docstring'''
super().__init__(
a_, a_, tokenizer_file=a_, unk_token=a_, bos_token=a_, eos_token=a_, add_prefix_space=a_, **a_, )
if kwargs.pop("""add_bos_token""", a_ ):
_snake_case : List[Any] = kwargs.pop("""name_or_path""", """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_snake_case : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", a_ ) != add_prefix_space:
_snake_case : Optional[int] = getattr(a_, pre_tok_state.pop("""type""" ) )
_snake_case : List[Any] = add_prefix_space
_snake_case : Union[str, Any] = pre_tok_class(**a_ )
_snake_case : Union[str, Any] = add_prefix_space
def UpperCamelCase_ ( self: Optional[Any], *a_: Any, **a_: Optional[int] ):
'''simple docstring'''
_snake_case : str = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: Union[str, Any], *a_: Optional[int], **a_: Optional[int] ):
'''simple docstring'''
_snake_case : Union[str, Any] = kwargs.get("""is_split_into_words""", a_ )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a_, **a_ )
def UpperCamelCase_ ( self: List[str], a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : List[Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], a_: bool = False, a_: bool = None, a_: Optional[List[str]] = None, **a_: Optional[int], ):
'''simple docstring'''
_snake_case : List[str] = super().decode(
token_ids=a_, skip_special_tokens=a_, clean_up_tokenization_spaces=a_, **a_, )
if truncate_before_pattern is not None and len(a_ ) > 0:
_snake_case : str = self.truncate(a_, a_ )
return decoded_text
def UpperCamelCase_ ( self: Optional[Any], a_: int, a_: int ):
'''simple docstring'''
def find_re(a_: str, a_: List[str], a_: Any ):
_snake_case : List[Any] = pattern.search(a_, a_ )
return m.start() if m else -1
_snake_case : List[Any] = [re.compile(a_, re.MULTILINE ) for pattern in truncate_before_pattern]
_snake_case : Union[str, Any] = list(re.finditer("""^print""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : Dict = completion[: prints[1].start()]
_snake_case : List[Any] = list(re.finditer("""^def""", a_, re.MULTILINE ) )
if len(a_ ) > 1:
_snake_case : Optional[int] = completion[: defs[1].start()]
_snake_case : Dict = 0
_snake_case : str = [
pos for pos in [find_re(a_, a_, a_ ) for terminal in terminals] if pos != -1
]
if len(a_ ) > 0:
return completion[: min(a_ )]
else:
return completion
| 362
|
"""simple docstring"""
import torch
from torch import nn
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Any, a_: List[str], a_: Union[str, Any], a_: int, a_: int, a_: List[Any]=1, a_: Union[str, Any]=False ):
'''simple docstring'''
super().__init__()
_snake_case : int = n_token
_snake_case : Tuple = d_embed
_snake_case : List[str] = d_proj
_snake_case : Optional[int] = cutoffs + [n_token]
_snake_case : Any = [0] + self.cutoffs
_snake_case : Tuple = div_val
_snake_case : Optional[int] = self.cutoffs[0]
_snake_case : Union[str, Any] = len(self.cutoffs ) - 1
_snake_case : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case : List[Any] = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed ) )
_snake_case : Tuple = nn.Parameter(torch.zeros(self.n_clusters ) )
_snake_case : Any = nn.ModuleList()
_snake_case : Optional[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) )
else:
self.out_projs.append(a_ )
self.out_layers.append(nn.Linear(a_, a_ ) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) )
self.out_layers.append(nn.Linear(a_, r_idx - l_idx ) )
_snake_case : Optional[int] = keep_order
def UpperCamelCase_ ( self: str, a_: Union[str, Any], a_: Dict, a_: int, a_: Tuple ):
'''simple docstring'''
if proj is None:
_snake_case : List[Any] = nn.functional.linear(a_, a_, bias=a_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case : List[Any] = nn.functional.linear(a_, proj.t().contiguous() )
_snake_case : Tuple = nn.functional.linear(a_, a_, bias=a_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self: Dict, a_: Dict, a_: str=None, a_: Union[str, Any]=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
_snake_case : int = hidden[..., :-1, :].contiguous()
_snake_case : List[Any] = labels[..., 1:].contiguous()
_snake_case : str = hidden.view(-1, hidden.size(-1 ) )
_snake_case : Dict = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_snake_case : int = hidden.view(-1, hidden.size(-1 ) )
if self.n_clusters == 0:
_snake_case : Tuple = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
if labels is not None:
_snake_case : Dict = labels != -100
_snake_case : str = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device )
_snake_case : str = (
-nn.functional.log_softmax(a_, dim=-1 )[mask].gather(1, labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_snake_case : Optional[int] = nn.functional.log_softmax(a_, dim=-1 )
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_snake_case , _snake_case : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Optional[int] = self.out_layers[i].weight
_snake_case : int = self.out_layers[i].bias
if i == 0:
_snake_case : List[str] = torch.cat([weight_i, self.cluster_weight], dim=0 )
_snake_case : int = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(a_ )
biases.append(a_ )
_snake_case , _snake_case , _snake_case : Any = weights[0], biases[0], self.out_projs[0]
_snake_case : List[str] = self._compute_logit(a_, a_, a_, a_ )
_snake_case : Union[str, Any] = nn.functional.log_softmax(a_, dim=1 )
if labels is None:
_snake_case : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_snake_case : Dict = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device )
_snake_case : Union[str, Any] = 0
_snake_case : Optional[Any] = [0] + self.cutoffs
for i in range(len(a_ ) - 1 ):
_snake_case , _snake_case : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case : Dict = (labels >= l_idx) & (labels < r_idx)
_snake_case : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case : List[str] = labels.index_select(0, a_ ) - l_idx
_snake_case : List[str] = head_logprob.index_select(0, a_ )
_snake_case : List[str] = hidden.index_select(0, a_ )
else:
_snake_case : List[str] = hidden
if i == 0:
if labels is not None:
_snake_case : Dict = head_logprob_i.gather(1, target_i[:, None] ).squeeze(1 )
else:
_snake_case : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : Dict = weights[i], biases[i], self.out_projs[i]
_snake_case : int = self._compute_logit(a_, a_, a_, a_ )
_snake_case : int = nn.functional.log_softmax(a_, dim=1 )
_snake_case : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None] ).squeeze(1 )
else:
_snake_case : Optional[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case : Any = logprob_i
if labels is not None:
if (hasattr(self, """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0, a_, -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
if self.n_clusters == 0:
_snake_case : Optional[int] = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
return nn.functional.log_softmax(a_, dim=-1 )
else:
# construct weights and biases
_snake_case , _snake_case : List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_snake_case , _snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : str = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : List[Any] = self.out_layers[i].weight
_snake_case : Union[str, Any] = self.out_layers[i].bias
if i == 0:
_snake_case : int = torch.cat([weight_i, self.cluster_weight], dim=0 )
_snake_case : Dict = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(a_ )
biases.append(a_ )
_snake_case , _snake_case , _snake_case : int = weights[0], biases[0], self.out_projs[0]
_snake_case : List[Any] = self._compute_logit(a_, a_, a_, a_ )
_snake_case : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_snake_case : int = nn.functional.log_softmax(a_, dim=1 )
_snake_case : List[Any] = [0] + self.cutoffs
for i in range(len(a_ ) - 1 ):
_snake_case , _snake_case : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : List[Any] = weights[i], biases[i], self.out_projs[i]
_snake_case : Optional[int] = self._compute_logit(a_, a_, a_, a_ )
_snake_case : Any = nn.functional.log_softmax(a_, dim=1 )
_snake_case : Dict = head_logprob[:, -i] + tail_logprob_i
_snake_case : Any = logprob_i
return out
| 132
| 0
|
def A_ ( a = 1_0 , a = 1_0_0_0 , a = True ):
"""simple docstring"""
assert (
isinstance(a , a )
and isinstance(a , a )
and isinstance(a , a )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def A_ ( a , a ):
"""simple docstring"""
return int((number_a + number_a) / 2 )
def A_ ( a , a , a ):
"""simple docstring"""
assert (
isinstance(a , a ) and isinstance(a , a ) and isinstance(a , a )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(a ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
SCREAMING_SNAKE_CASE_ : str = lower
SCREAMING_SNAKE_CASE_ : Dict = higher
SCREAMING_SNAKE_CASE_ : Dict = []
while True:
SCREAMING_SNAKE_CASE_ : int = get_avg(a , a )
last_numbers.append(a )
if answer(a ) == "low":
SCREAMING_SNAKE_CASE_ : str = number
elif answer(a ) == "high":
SCREAMING_SNAKE_CASE_ : List[str] = number
else:
break
print(f"guess the number : {last_numbers[-1]}" )
print(f"details : {last_numbers!s}" )
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = int(input('Enter lower value : ' ).strip() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(input('Enter high value : ' ).strip() )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(input('Enter value to guess : ' ).strip() )
guess_the_number(a , a , a )
if __name__ == "__main__":
main()
| 253
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase : List[Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = dataset
SCREAMING_SNAKE_CASE_ : List[Any] = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start_length
SCREAMING_SNAKE_CASE_ : Any = eof_strings
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = re.split('(%s)' % '|'.join(a ) , a )
# last string should be ""
return "".join(string_list[:-2] )
def A_ ( a , a , a , a , a , a=2_0 , **a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = defaultdict(a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch['ids'].shape[-1]
SCREAMING_SNAKE_CASE_ : str = accelerator.unwrap_model(a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=a , **a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch['task_id'].repeat(a )
SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.pad_across_processes(
a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : List[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(a , a ):
gen_token_dict[task].append(a )
SCREAMING_SNAKE_CASE_ : str = [[] for _ in range(a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
code_gens[task].append(remove_last_block(a ) )
return code_gens
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = HfArgumentParser(a )
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Optional[int] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : List[str] = 'false'
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
set_seed(args.seed , device_specific=a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : Any = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , a , a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : List[str] = load_dataset('openai_humaneval' )
SCREAMING_SNAKE_CASE_ : str = load_metric('code_eval' )
SCREAMING_SNAKE_CASE_ : Tuple = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
SCREAMING_SNAKE_CASE_ : Any = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : int = TokenizedDataset(a , human_eval['test'] , n_copies=a , n_tasks=a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Tuple = DataLoader(a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.prepare(a , a )
SCREAMING_SNAKE_CASE_ : List[str] = complete_code(
a , a , a , a , n_tasks=a , batch_size=args.batch_size , **a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : str = []
for task in tqdm(range(a ) ):
SCREAMING_SNAKE_CASE_ : str = human_eval['test'][task]['test']
SCREAMING_SNAKE_CASE_ : int = f"check({human_eval['test'][task]['entry_point']})"
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(
references=a , predictions=a , num_workers=args.num_workers )
print(f"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(a , a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 253
| 1
|
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCamelCase ( __lowercase : Union[str, Any] ):
'''simple docstring'''
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
A_ : Optional[int] = key.replace('heads.cmd.mim_head.cls.predictions' ,'mmm_image_head' )
A_ : Dict = key.replace('heads.cmd.mlm_head.cls.predictions' ,'mmm_text_head' )
A_ : Union[str, Any] = key.replace('heads.cmd.itm_head.cls' ,'itm_head' )
A_ : str = key.replace('heads.cmd.itm_head.pooler' ,'itm_head.pooler' )
A_ : Union[str, Any] = key.replace('heads.cmd.clip_head.logit_scale' ,'flava.logit_scale' )
A_ : int = key.replace('heads.fairseq_mlm.cls.predictions' ,'mlm_head' )
A_ : Any = key.replace('heads.imagenet.mim_head.cls.predictions' ,'mim_head' )
A_ : str = key.replace('mm_text_projection' ,'flava.text_to_mm_projection' )
A_ : Optional[int] = key.replace('mm_image_projection' ,'flava.image_to_mm_projection' )
A_ : List[Any] = key.replace('image_encoder.module' ,'flava.image_model' )
A_ : int = key.replace('text_encoder.module' ,'flava.text_model' )
A_ : str = key.replace('mm_encoder.module.encoder.cls_token' ,'flava.multimodal_model.cls_token' )
A_ : Any = key.replace('mm_encoder.module' ,'flava.multimodal_model' )
A_ : List[str] = key.replace('text_projection' ,'flava.text_projection' )
A_ : Optional[Any] = key.replace('image_projection' ,'flava.image_projection' )
A_ : Optional[int] = value.float()
for key, value in codebook_state_dict.items():
A_ : Any = value
return upgrade
@torch.no_grad()
def UpperCamelCase ( __lowercase : int ,__lowercase : Dict ,__lowercase : int ,__lowercase : Dict=None ):
'''simple docstring'''
if config_path is not None:
A_ : Optional[Any] = FlavaConfig.from_pretrained(__lowercase )
else:
A_ : Tuple = FlavaConfig()
A_ : Tuple = FlavaForPreTraining(__lowercase ).eval()
A_ : Union[str, Any] = convert_dalle_checkpoint(__lowercase ,__lowercase ,save_checkpoint=__lowercase )
if os.path.exists(__lowercase ):
A_ : str = torch.load(__lowercase ,map_location='cpu' )
else:
A_ : List[str] = torch.hub.load_state_dict_from_url(__lowercase ,map_location='cpu' )
A_ : Optional[int] = upgrade_state_dict(__lowercase ,__lowercase )
hf_model.load_state_dict(__lowercase )
A_ : Dict = hf_model.state_dict()
A_ : Dict = count_parameters(__lowercase )
A_ : Tuple = count_parameters(__lowercase ) + count_parameters(__lowercase )
assert torch.allclose(__lowercase ,__lowercase ,atol=1e-3 )
hf_model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_UpperCAmelCase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 192
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 192
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : Optional[int] = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ["""ViTFeatureExtractor"""]
UpperCAmelCase_ : Optional[Any] = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 91
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = """"""
a_ : Dict = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ) ->Optional[int]:
super().__init__(self , **__UpperCAmelCase)
a_ = repo_info
a_ = token
a_ = None
def UpperCAmelCase__ ( self) ->Tuple:
if self.dir_cache is None:
a_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a_ = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCAmelCase): {"name": str(__UpperCAmelCase), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1]
})
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = "rb" , **__UpperCAmelCase , ) ->List[Any]:
if not isinstance(self.repo_info , __UpperCAmelCase):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''')
a_ = hf_hub_url(self.repo_info.id , __UpperCAmelCase , revision=self.repo_info.sha)
return fsspec.open(
__UpperCAmelCase , mode=__UpperCAmelCase , headers=get_authentication_headers_for_url(__UpperCAmelCase , use_auth_token=self.token) , client_kwargs={"trust_env": True} , ).open()
def UpperCAmelCase__ ( self , __UpperCAmelCase , **__UpperCAmelCase) ->int:
self._get_dirs()
a_ = self._strip_protocol(__UpperCAmelCase)
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCAmelCase)
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=False , **__UpperCAmelCase) ->List[Any]:
self._get_dirs()
a_ = PurePosixPath(path.strip("/"))
a_ = {}
for p, f in self.dir_cache.items():
a_ = PurePosixPath(p.strip("/"))
a_ = p.parent
if root == path:
a_ = f
a_ = list(paths.values())
if detail:
return out
else:
return sorted(f["name"] for f in out)
| 243
| 0
|
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a : List[str] = object()
# For specifying empty leaf dict `{}`
a : Optional[Any] = object()
def lowercase__(A , A ) ->Union[str, Any]:
"""simple docstring"""
lowercase__ : str= tuple((re.compile(x + "$" ) for x in qs) )
for i in range(len(A ) - len(A ) + 1 ):
lowercase__ : List[str]= [x.match(A ) for x, y in zip(A , ks[i:] )]
if matches and all(A ):
return True
return False
def lowercase__(A ) ->Tuple:
"""simple docstring"""
def replace(A , A ):
for rule, replacement in rules:
if _match(A , A ):
return replacement
return val
return replace
def lowercase__() ->Union[str, Any]:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , A )),
(("transformer", "wte", "embedding"), P("mp" , A )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(A , "mp" )),
(("attention", "out_proj", "kernel"), P("mp" , A )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(A , "mp" )),
(("mlp", "c_fc", "bias"), P("mp" )),
(("mlp", "c_proj", "kernel"), P("mp" , A )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowercase__(A ) ->str:
"""simple docstring"""
lowercase__ : List[Any]= _get_partition_rules()
lowercase__ : int= _replacement_rules(A )
lowercase__ : List[str]= {k: _unmatched for k in flatten_dict(A )}
lowercase__ : Optional[Any]= {k: replace(A , A ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(A ) )
| 360
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
a : List[str] = {
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
lowercase__ : Union[str, Any]= TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def UpperCAmelCase_ ( cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Union[str, Any]= BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
lowercase__ : List[Any]= BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(snake_case__ , repo_id="test-config" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase__ : int= BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[int]= BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
lowercase__ : Optional[Any]= BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
snake_case__ , repo_id="valid_org/test-config-org" , push_to_hub=snake_case__ , use_auth_token=self._token )
lowercase__ : Any= BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(snake_case__ , getattr(snake_case__ , snake_case__ ) )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowercase__ : Union[str, Any]= CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
lowercase__ : List[str]= AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__ : str= c.n_embd + 1 # int
lowercase__ : Tuple= c.resid_pdrop + 1.0 # float
lowercase__ : Union[str, Any]= not c.scale_attn_weights # bool
lowercase__ : Optional[Any]= c.summary_type + "foo" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(snake_case__ , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(snake_case__ , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(snake_case__ , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(snake_case__ , c.summary_type , "mismatch for key: summary_type" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= PretrainedConfig()
lowercase__ : List[str]= [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
snake_case__ , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
lowercase__ : Tuple= [key for key, value in config_common_kwargs.items() if value == getattr(snake_case__ , snake_case__ )]
if len(snake_case__ ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
F''' {', '.join(snake_case__ )}.''' )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(snake_case__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : Optional[int]= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
lowercase__ : Optional[Any]= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
lowercase__ : str= mock.Mock()
lowercase__ : Optional[Any]= 500
lowercase__ : Any= {}
lowercase__ : Tuple= HTTPError
lowercase__ : List[Any]= {}
# Download this model to make sure it's in the cache.
lowercase__ : Any= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case__ ) as mock_head:
lowercase__ : Any= BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
lowercase__ : Optional[Any]= BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= AutoConfig.from_pretrained("bert-base-cased" )
lowercase__ : Optional[int]= ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(snake_case__ )
lowercase__ : List[Any]= 2
json.dump(configuration.to_dict() , open(os.path.join(snake_case__ , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__ : int= AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__ : Optional[int]= ["config.42.0.0.json"]
lowercase__ : int= 768
configuration.save_pretrained(snake_case__ )
shutil.move(os.path.join(snake_case__ , "config.4.0.0.json" ) , os.path.join(snake_case__ , "config.42.0.0.json" ) )
lowercase__ : Optional[Any]= AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowercase__ : Optional[Any]= "hf-internal-testing/test-two-configs"
import transformers as new_transformers
lowercase__ : Optional[Any]= "v4.0.0"
lowercase__, lowercase__ : str= new_transformers.models.auto.AutoConfig.from_pretrained(
snake_case__ , return_unused_kwargs=snake_case__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(snake_case__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__ : Dict= "v3.0.0"
lowercase__ : Tuple= old_transformers.models.auto.AutoConfig.from_pretrained(snake_case__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 150
| 0
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase( ):
lowerCAmelCase_ : Tuple = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
lowerCAmelCase_ : Optional[Any] = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ).convert('''RGB''' )
return image
def UpperCamelCase( __UpperCamelCase : str ):
lowerCAmelCase_ : str = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : List[str] ):
lowerCAmelCase_ : int = dct.pop(__UpperCamelCase )
lowerCAmelCase_ : Dict = val
def UpperCamelCase( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase_ : str = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowerCAmelCase_ : int = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowerCAmelCase_ : Dict = torch.cat((q_bias, torch.zeros_like(__UpperCamelCase ,requires_grad=__UpperCamelCase ), v_bias) )
lowerCAmelCase_ : Tuple = qkv_bias
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ):
lowerCAmelCase_ : Dict = 364 if '''coco''' in model_name else 224
lowerCAmelCase_ : Any = BlipaVisionConfig(image_size=__UpperCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCAmelCase_ : Union[str, Any] = OPTConfig.from_pretrained('''facebook/opt-2.7b''' ,eos_token_id=__UpperCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
lowerCAmelCase_ : Dict = OPTConfig.from_pretrained('''facebook/opt-6.7b''' ,eos_token_id=__UpperCamelCase ).to_dict()
elif "t5-xl" in model_name:
lowerCAmelCase_ : int = TaConfig.from_pretrained('''google/flan-t5-xl''' ,dense_act_fn='''gelu''' ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase_ : List[Any] = TaConfig.from_pretrained('''google/flan-t5-xxl''' ,dense_act_fn='''gelu''' ,bos_token_id=1 ).to_dict()
lowerCAmelCase_ : Optional[Any] = BlipaConfig(vision_config=__UpperCamelCase ,text_config=__UpperCamelCase )
return config, image_size
@torch.no_grad()
def UpperCamelCase( __UpperCamelCase : List[Any] ,__UpperCamelCase : Optional[Any]=None ,__UpperCamelCase : Any=False ):
lowerCAmelCase_ : Any = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
lowerCAmelCase_ : Optional[int] = tokenizer('''\n''' ,add_special_tokens=__UpperCamelCase ).input_ids[0]
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = get_blipa_config(__UpperCamelCase ,eos_token_id=__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = BlipaForConditionalGeneration(__UpperCamelCase ).eval()
lowerCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
lowerCAmelCase_ , lowerCAmelCase_ : Any = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
lowerCAmelCase_ : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = load_model_and_preprocess(
name=__UpperCamelCase ,model_type=__UpperCamelCase ,is_eval=__UpperCamelCase ,device=__UpperCamelCase )
original_model.eval()
print('''Done!''' )
# update state dict keys
lowerCAmelCase_ : Tuple = original_model.state_dict()
lowerCAmelCase_ : int = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase_ : Optional[Any] = state_dict.pop(__UpperCamelCase )
if key.startswith('''Qformer.bert''' ):
lowerCAmelCase_ : List[str] = key.replace('''Qformer.bert''' ,'''qformer''' )
if "attention.self" in key:
lowerCAmelCase_ : Union[str, Any] = key.replace('''self''' ,'''attention''' )
if "opt_proj" in key:
lowerCAmelCase_ : Any = key.replace('''opt_proj''' ,'''language_projection''' )
if "t5_proj" in key:
lowerCAmelCase_ : List[Any] = key.replace('''t5_proj''' ,'''language_projection''' )
if key.startswith('''opt''' ):
lowerCAmelCase_ : Union[str, Any] = key.replace('''opt''' ,'''language''' )
if key.startswith('''t5''' ):
lowerCAmelCase_ : Optional[int] = key.replace('''t5''' ,'''language''' )
lowerCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = hf_model.load_state_dict(__UpperCamelCase ,strict=__UpperCamelCase )
assert len(__UpperCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCAmelCase_ : List[str] = load_demo_image()
lowerCAmelCase_ : Any = vis_processors['''eval'''](__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = tokenizer(['''\n'''] ,return_tensors='''pt''' ).input_ids.to(__UpperCamelCase )
# create processor
lowerCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} ,image_mean=__UpperCamelCase ,image_std=__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = BlipaProcessor(image_processor=__UpperCamelCase ,tokenizer=__UpperCamelCase )
lowerCAmelCase_ : int = processor(images=__UpperCamelCase ,return_tensors='''pt''' ).pixel_values.to(__UpperCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__UpperCamelCase ,__UpperCamelCase )
original_model.to(__UpperCamelCase )
hf_model.to(__UpperCamelCase )
with torch.no_grad():
if "opt" in model_name:
lowerCAmelCase_ : List[str] = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
lowerCAmelCase_ : str = hf_model(__UpperCamelCase ,__UpperCamelCase ).logits
else:
lowerCAmelCase_ : str = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
lowerCAmelCase_ : Union[str, Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id ,-100 )
lowerCAmelCase_ : Tuple = hf_model(__UpperCamelCase ,__UpperCamelCase ,labels=__UpperCamelCase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' ,original_logits[0, :3, :3] )
print('''First values of HF logits:''' ,logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] ,device=__UpperCamelCase )
assert torch.allclose(logits[0, :3, :3] ,__UpperCamelCase ,atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCAmelCase_ : List[str] = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] ,device=__UpperCamelCase )
else:
# cast to same type
lowerCAmelCase_ : Dict = logits.dtype
assert torch.allclose(original_logits.to(__UpperCamelCase ) ,__UpperCamelCase ,atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
lowerCAmelCase_ : Union[str, Any] = ''''''
lowerCAmelCase_ : List[str] = tokenizer(__UpperCamelCase ,return_tensors='''pt''' ).input_ids.to(__UpperCamelCase )
lowerCAmelCase_ : Any = original_model.generate({'''image''': original_pixel_values} )
lowerCAmelCase_ : Dict = hf_model.generate(
__UpperCamelCase ,__UpperCamelCase ,do_sample=__UpperCamelCase ,num_beams=5 ,max_length=30 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.0 ,length_penalty=1.0 ,temperature=1 ,)
print('''Original generation:''' ,__UpperCamelCase )
lowerCAmelCase_ : str = input_ids.shape[1]
lowerCAmelCase_ : Optional[int] = processor.batch_decode(outputs[:, prompt_length:] ,skip_special_tokens=__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' ,__UpperCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
A__ : Tuple = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
A__ : Optional[int] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 103
|
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__a = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__a = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__a = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__a = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__a = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__a = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Any = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowercase )
return [m.group(0 ) for m in matches]
def A_ ( ):
'''simple docstring'''
snake_case_ :int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case_ :Dict = {
config.replace("""Config""", """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
snake_case_ :Optional[Any] = collections.defaultdict(_lowercase )
snake_case_ :int = collections.defaultdict(_lowercase )
snake_case_ :List[str] = collections.defaultdict(_lowercase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_lowercase ):
snake_case_ :int = None
if _re_tf_models.match(_lowercase ) is not None:
snake_case_ :int = tf_models
snake_case_ :List[str] = _re_tf_models.match(_lowercase ).groups()[0]
elif _re_flax_models.match(_lowercase ) is not None:
snake_case_ :List[Any] = flax_models
snake_case_ :Any = _re_flax_models.match(_lowercase ).groups()[0]
elif _re_pt_models.match(_lowercase ) is not None:
snake_case_ :Optional[Any] = pt_models
snake_case_ :int = _re_pt_models.match(_lowercase ).groups()[0]
if lookup_dict is not None:
while len(_lowercase ) > 0:
if attr_name in model_prefix_to_model_type:
snake_case_ :Optional[int] = True
break
# Try again after removing the last word in the name
snake_case_ :Optional[Any] = """""".join(camel_case_split(_lowercase )[:-1] )
snake_case_ :Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
snake_case_ :Optional[Any] = list(_lowercase )
all_models.sort()
snake_case_ :Optional[int] = {"""model_type""": all_models}
snake_case_ :Optional[int] = [pt_models[t] for t in all_models]
snake_case_ :Any = [tf_models[t] for t in all_models]
snake_case_ :Dict = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
snake_case_ :Dict = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
snake_case_ :Optional[Any] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
snake_case_ :Tuple = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
snake_case_ :Tuple = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
snake_case_ :str = """AutoTokenizer"""
snake_case_ :int = [processors[t] for t in all_models]
return pd.DataFrame(_lowercase )
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
snake_case_ :Optional[int] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
snake_case_ :List[str] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(_lowercase, _lowercase, _lowercase ):
# The type of pipeline may not exist in this framework
if not hasattr(_lowercase, _lowercase ):
continue
# First extract all model_names
snake_case_ :Tuple = []
for name in getattr(_lowercase, _lowercase ).values():
if isinstance(_lowercase, _lowercase ):
model_names.append(_lowercase )
else:
model_names.extend(list(_lowercase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = get_frameworks_table()
snake_case_ :str = Dataset.from_pandas(_lowercase )
snake_case_ :List[Any] = hf_hub_download(
"""huggingface/transformers-metadata""", """pipeline_tags.json""", repo_type="""dataset""", token=_lowercase )
snake_case_ :List[str] = Dataset.from_json(_lowercase )
snake_case_ :int = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(_lowercase ) )
}
snake_case_ :Optional[int] = update_pipeline_and_auto_class_table(_lowercase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
snake_case_ :Tuple = sorted(table.keys() )
snake_case_ :Tuple = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
snake_case_ :Union[str, Any] = Dataset.from_pandas(_lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_lowercase, """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(_lowercase, """pipeline_tags.json""" ) )
if commit_sha is not None:
snake_case_ :Union[str, Any] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
snake_case_ :List[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""", folder_path=_lowercase, repo_type="""dataset""", token=_lowercase, commit_message=_lowercase, )
def A_ ( ):
'''simple docstring'''
snake_case_ :List[Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
snake_case_ :Dict = transformers_module.pipelines.SUPPORTED_TASKS
snake_case_ :List[str] = []
for key in pipeline_tasks:
if key not in in_table:
snake_case_ :int = pipeline_tasks[key]["""pt"""]
if isinstance(_lowercase, (list, tuple) ):
snake_case_ :Any = model[0]
snake_case_ :str = model.__name__
if model not in in_table.values():
missing.append(_lowercase )
if len(_lowercase ) > 0:
snake_case_ :Optional[int] = """, """.join(_lowercase )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__a = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 66
| 0
|
from collections.abc import Generator
from math import sin
def __lowerCamelCase ( lowerCAmelCase__ ):
if len(UpperCamelCase__ ) != 3_2:
raise ValueError('Input must be of length 32' )
lowerCAmelCase__ = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __lowerCamelCase ( lowerCAmelCase__ ):
if i < 0:
raise ValueError('Input must be non-negative' )
lowerCAmelCase__ = format(UpperCamelCase__ , '08x' )[-8:]
lowerCAmelCase__ = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = b''
for char in message:
bit_string += format(UpperCamelCase__ , '08b' ).encode('utf-8' )
lowerCAmelCase__ = format(len(UpperCamelCase__ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase__ ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def __lowerCamelCase ( lowerCAmelCase__ ):
if len(UpperCamelCase__ ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(UpperCamelCase__ ) , 5_1_2 ):
lowerCAmelCase__ = bit_string[pos : pos + 5_1_2]
lowerCAmelCase__ = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def __lowerCamelCase ( lowerCAmelCase__ ):
if i < 0:
raise ValueError('Input must be non-negative' )
lowerCAmelCase__ = format(UpperCamelCase__ , '032b' )
lowerCAmelCase__ = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase__ , 2 )
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
return (a + b) % 2**3_2
def __lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = preprocess(UpperCamelCase__ )
lowerCAmelCase__ = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
lowerCAmelCase__ = 0x67452301
lowerCAmelCase__ = 0xefcdab89
lowerCAmelCase__ = 0x98badcfe
lowerCAmelCase__ = 0x10325476
lowerCAmelCase__ = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase__ ):
lowerCAmelCase__ = aa
lowerCAmelCase__ = ba
lowerCAmelCase__ = ca
lowerCAmelCase__ = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
lowerCAmelCase__ = d ^ (b & (c ^ d))
lowerCAmelCase__ = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
lowerCAmelCase__ = c ^ (d & (b ^ c))
lowerCAmelCase__ = (5 * i + 1) % 1_6
elif i <= 4_7:
lowerCAmelCase__ = b ^ c ^ d
lowerCAmelCase__ = (3 * i + 5) % 1_6
else:
lowerCAmelCase__ = c ^ (b | not_aa(UpperCamelCase__ ))
lowerCAmelCase__ = (7 * i) % 1_6
lowerCAmelCase__ = (f + a + added_consts[i] + block_words[g]) % 2**3_2
lowerCAmelCase__ = d
lowerCAmelCase__ = c
lowerCAmelCase__ = b
lowerCAmelCase__ = sum_aa(UpperCamelCase__ , left_rotate_aa(UpperCamelCase__ , shift_amounts[i] ) )
# Add hashed chunk to running total
lowerCAmelCase__ = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase__ = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase__ = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase__ = sum_aa(UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase__ = reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ ) + reformat_hex(UpperCamelCase__ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
lowerCAmelCase__ = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
lowerCAmelCase__ = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
lowerCAmelCase__ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase__ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowerCAmelCase__ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowerCAmelCase__ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowerCAmelCase__ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(SCREAMING_SNAKE_CASE )
class a_ :
'''simple docstring'''
def __call__( self : Optional[int] , lowercase__ : List[str] , lowercase__ : Optional[str] = None , lowercase__ : Optional[str] = None , lowercase__ : Union[bool, str] = False , lowercase__ : Union[bool, str] = False , lowercase__ : Optional[int] = None , lowercase__ : Optional[Union[str, TensorType]] = None , lowercase__ : Optional[bool] = None , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
elif titles is None or texts is None:
lowerCAmelCase__ = titles if texts is None else texts
return super().__call__(
lowercase__ , lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__ , return_attention_mask=lowercase__ , **lowercase__ , )
lowerCAmelCase__ = titles if not isinstance(lowercase__ , lowercase__) else [titles]
lowerCAmelCase__ = texts if not isinstance(lowercase__ , lowercase__) else [texts]
lowerCAmelCase__ = len(lowercase__)
lowerCAmelCase__ = questions if not isinstance(lowercase__ , lowercase__) else [questions] * n_passages
if len(lowercase__) != len(lowercase__):
raise ValueError(
F"""There should be as many titles than texts but got {len(lowercase__)} titles and {len(lowercase__)} texts.""")
lowerCAmelCase__ = super().__call__(lowercase__ , lowercase__ , padding=lowercase__ , truncation=lowercase__)['input_ids']
lowerCAmelCase__ = super().__call__(lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__)['input_ids']
lowerCAmelCase__ = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowercase__ , lowercase__)
]
}
if return_attention_mask is not False:
lowerCAmelCase__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
lowerCAmelCase__ = attention_mask
return self.pad(lowercase__ , padding=lowercase__ , max_length=lowercase__ , return_tensors=lowercase__)
def __snake_case ( self : Union[str, Any] , lowercase__ : BatchEncoding , lowercase__ : DPRReaderOutput , lowercase__ : int = 16 , lowercase__ : int = 64 , lowercase__ : int = 4 , ):
'''simple docstring'''
lowerCAmelCase__ = reader_input['input_ids']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reader_output[:3]
lowerCAmelCase__ = len(lowercase__)
lowerCAmelCase__ = sorted(range(lowercase__) , reverse=lowercase__ , key=relevance_logits.__getitem__)
lowerCAmelCase__ = []
for doc_id in sorted_docs:
lowerCAmelCase__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
lowerCAmelCase__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCAmelCase__ = sequence_ids.index(self.pad_token_id)
else:
lowerCAmelCase__ = len(lowercase__)
lowerCAmelCase__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase__ , top_spans=lowercase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase__ , start_index=lowercase__ , end_index=lowercase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowercase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __snake_case ( self : Optional[int] , lowercase__ : List[int] , lowercase__ : List[int] , lowercase__ : int , lowercase__ : int , ):
'''simple docstring'''
lowerCAmelCase__ = []
for start_index, start_score in enumerate(lowercase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
lowerCAmelCase__ = sorted(lowercase__ , key=lambda lowercase__: x[1] , reverse=lowercase__)
lowerCAmelCase__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""")
lowerCAmelCase__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowercase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class a_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ = ['input_ids', 'attention_mask']
| 119
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase__ : Union[str, Any] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = get_activation('''swish''' )
self.assertIsInstance(_a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''silu''' )
self.assertIsInstance(_a , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''mish''' )
self.assertIsInstance(_a , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCAmelCase ( self ):
__a = get_activation('''gelu''' )
self.assertIsInstance(_a , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 45
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a ( __SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """glpn"""
def __init__( self , __magic_name__=3 , __magic_name__=4 , __magic_name__=[2, 2, 2, 2] , __magic_name__=[8, 4, 2, 1] , __magic_name__=[32, 64, 1_60, 2_56] , __magic_name__=[7, 3, 3, 3] , __magic_name__=[4, 2, 2, 2] , __magic_name__=[1, 2, 5, 8] , __magic_name__=[4, 4, 4, 4] , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0_2 , __magic_name__=0.1 , __magic_name__=1e-6 , __magic_name__=64 , __magic_name__=10 , __magic_name__=-1 , **__magic_name__ , ) -> str:
super().__init__(**_SCREAMING_SNAKE_CASE )
_a = num_channels
_a = num_encoder_blocks
_a = depths
_a = sr_ratios
_a = hidden_sizes
_a = patch_sizes
_a = strides
_a = mlp_ratios
_a = num_attention_heads
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = drop_path_rate
_a = layer_norm_eps
_a = decoder_hidden_size
_a = max_depth
_a = head_in_index
| 364
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a_ : int = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[str] = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
a_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104
| 0
|
def UpperCamelCase ( __magic_name__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ = []
for temp in range(int(__magic_name__ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
A : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 305
|
def UpperCamelCase ( __magic_name__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
lowercase__ = []
for temp in range(int(__magic_name__ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
A : Tuple = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 305
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=None , ):
UpperCamelCase__ = size if size is not None else {"""shortest_edge""": 20}
UpperCamelCase__ = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Optional[Any] = MobileNetVaImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
UpperCamelCase__ = MobileNetVaImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """crop_size""" ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase__ = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase__ = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
UpperCamelCase__ = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 87
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=99 , __lowerCAmelCase=64 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=16 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = vocab_size - 1
def _lowerCamelCase ( self ):
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = True
return config, input_ids, input_mask, token_labels
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
UpperCamelCase__ = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForQuestionAnswering(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = GPTNeoXForTokenClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = True
UpperCamelCase__ = GPTNeoXForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
UpperCamelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCamelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCamelCase__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
UpperCamelCase__ = output_from_no_past["""hidden_states"""][0]
UpperCamelCase__ = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCamelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _a , _a , _a , unittest.TestCase ):
snake_case : Optional[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else ()
snake_case : Dict = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case : Tuple = False
snake_case : Dict = False
snake_case : Tuple = False
snake_case : Any = False
def _lowerCamelCase ( self ):
UpperCamelCase__ = GPTNeoXModelTester(self )
UpperCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=64 , num_attention_heads=8 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
# This regression test was failing with PyTorch < 1.3
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCamelCase__ = None
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _lowerCamelCase ( self ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ids_tensor([1, 10] , config.vocab_size )
UpperCamelCase__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
original_model.to(__lowerCAmelCase )
original_model.eval()
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = original_model(__lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCamelCase__ = {"""type""": scaling_type, """factor""": 10.0}
UpperCamelCase__ = GPTNeoXModel(__lowerCAmelCase )
scaled_model.to(__lowerCAmelCase )
scaled_model.eval()
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
UpperCamelCase__ = scaled_model(__lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-5 ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCamelCase__ = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__lowerCAmelCase )
UpperCamelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowerCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCamelCase__ = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCamelCase__ = model.generate(**__lowerCAmelCase , do_sample=__lowerCAmelCase , max_new_tokens=20 )
UpperCamelCase__ = tokenizer.batch_decode(__lowerCAmelCase )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 87
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_bridgetower''': [
'''BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BridgeTowerConfig''',
'''BridgeTowerTextConfig''',
'''BridgeTowerVisionConfig''',
],
'''processing_bridgetower''': ['''BridgeTowerProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''BridgeTowerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BridgeTowerForContrastiveLearning''',
'''BridgeTowerForImageAndTextRetrieval''',
'''BridgeTowerForMaskedLM''',
'''BridgeTowerModel''',
'''BridgeTowerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 87
|
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case : Optional[int] = str(bin(lowercase ) )
binary_number += "0" * shift_amount
return binary_number
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("""both inputs must be positive integers""" )
snake_case : Dict = str(bin(lowercase ) )[2:]
if shift_amount >= len(lowercase ):
return "0b0"
snake_case : str = binary_number[: len(lowercase ) - shift_amount]
return "0b" + shifted_binary_number
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
if number >= 0: # Get binary representation of positive number
snake_case : Optional[Any] = """0""" + str(bin(lowercase ) ).strip("""-""" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case : Dict = len(bin(lowercase )[3:] ) # Find 2's complement of number
snake_case : Optional[Any] = bin(abs(lowercase ) - (1 << binary_number_length) )[3:]
snake_case : Tuple = (
"""1""" + """0""" * (binary_number_length - len(lowercase )) + binary_number
)
if shift_amount >= len(lowercase ):
return "0b" + binary_number[0] * len(lowercase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(lowercase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124
| 0
|
_lowerCamelCase : int = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowerCamelCase : str = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowerCamelCase : Optional[Any] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 351
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_lowerCamelCase : str = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "sequence-classification"
def __init__( self : Optional[int] , lowercase : Optional[Any] ):
'''simple docstring'''
if type(lowercase ) == dict:
_snake_case = Namespace(**lowercase )
_snake_case = glue_output_modes[hparams.task]
_snake_case = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def A ( self : List[str] , **lowercase : Optional[Any] ):
'''simple docstring'''
return self.model(**lowercase )
def A ( self : Tuple , lowercase : Optional[Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case = outputs[0]
_snake_case = self.trainer.lr_schedulers[0]['scheduler']
_snake_case = {'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def A ( self : str ):
'''simple docstring'''
_snake_case = self.hparams
_snake_case = processors[args.task]()
_snake_case = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , lowercase )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_snake_case = (
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_snake_case = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , lowercase )
torch.save(lowercase , lowercase )
def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
_snake_case = 'dev' if mode == 'test' else mode
_snake_case = self._feature_file(lowercase )
logger.info('Loading features from cached file %s' , lowercase )
_snake_case = torch.load(lowercase )
_snake_case = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def A ( self : str , lowercase : Dict , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = {'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case = batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_snake_case = self(**lowercase )
_snake_case , _snake_case = outputs[:2]
_snake_case = logits.detach().cpu().numpy()
_snake_case = inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def A ( self : Optional[Any] , lowercase : Optional[Any] ):
'''simple docstring'''
_snake_case = torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_snake_case = np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case = np.squeeze(lowercase )
_snake_case = np.concatenate([x['target'] for x in outputs] , axis=0 )
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case = {**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
_snake_case = dict(results.items() )
_snake_case = results
return ret, preds_list, out_label_list
def A ( self : List[str] , lowercase : list ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def A ( self : Tuple , lowercase : Tuple ):
'''simple docstring'''
_snake_case , _snake_case , _snake_case = self._eval_end(lowercase )
_snake_case = ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def A ( lowercase : Optional[int] , lowercase : Optional[int] ):
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
'--max_seq_length' , default=128 , type=lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=lowercase , required=lowercase , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=lowercase , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def a_ ( ) -> List[str]:
_snake_case = argparse.ArgumentParser()
add_generic_args(__lowercase , os.getcwd() )
_snake_case = GLUETransformer.add_model_specific_args(__lowercase , os.getcwd() )
_snake_case = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case = os.path.join(
'./results' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_snake_case = GLUETransformer(__lowercase )
_snake_case = generic_train(__lowercase , __lowercase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case = sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=__lowercase ) )
_snake_case = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowercase )
if __name__ == "__main__":
main()
| 130
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a_ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a_ = {'''facebook/blenderbot_small-90M''': 512}
def _a ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(UpperCamelCase_ )
return pairs
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase="__start__" , __UpperCAmelCase="__end__" , __UpperCAmelCase="__unk__" , __UpperCAmelCase="__null__" , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , **__UpperCAmelCase )
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__UpperCAmelCase )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split("\n" )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCAmelCase__ = {}
@property
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = re.sub("([.,!?()])" , R" \1" , __UpperCAmelCase )
lowerCAmelCase__ = re.sub("(')" , R" \1 " , __UpperCAmelCase )
lowerCAmelCase__ = re.sub(R"\s{2,}" , " " , __UpperCAmelCase )
if "\n" in token:
lowerCAmelCase__ = token.replace("\n" , " __newln__" )
lowerCAmelCase__ = token.split(" " )
lowerCAmelCase__ = []
for token in tokens:
if not len(__UpperCAmelCase ):
continue
lowerCAmelCase__ = token.lower()
lowerCAmelCase__ = tuple(__UpperCAmelCase )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowerCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
words.append(__UpperCAmelCase )
continue
while True:
lowerCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
lowerCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(__UpperCAmelCase )
lowerCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(__UpperCAmelCase )
lowerCAmelCase__ = "@@ ".join(__UpperCAmelCase )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
words.append(__UpperCAmelCase )
return " ".join(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(R"\S+\n?" , __UpperCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def UpperCAmelCase ( self , __UpperCAmelCase )-> int:
'''simple docstring'''
lowerCAmelCase__ = token.lower()
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def UpperCAmelCase ( self , __UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = " ".join(__UpperCAmelCase ).replace("@@ " , "" ).strip()
return out_string
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
lowerCAmelCase__ = 0
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
lowerCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
| 340
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340
| 1
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = min(__a ) # min() finds the minimum value
UpperCamelCase__ :Union[str, Any] = max(__a ) # max() finds the maximum value
UpperCamelCase__ :int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
UpperCamelCase__ :Any = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(__a , __a ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
UpperCamelCase__ :Tuple = 0
for count in range(__a ):
while holes[count] > 0:
holes[count] -= 1
UpperCamelCase__ :Tuple = count + min_val
i += 1
def a ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(__a )
print('''Sorted order is:''' , ''' '''.join(__a ) )
if __name__ == "__main__":
main()
| 219
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def a ( __a ) -> Optional[int]:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def a ( __a ) -> str:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ :Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 219
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=8 ) ->Tuple:
_SCREAMING_SNAKE_CASE = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_SCREAMING_SNAKE_CASE = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A , A , A , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=A , scheduler=A , movq=A , )
_SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case_( self , A , A , A , A , A , A ) -> Union[str, Any]:
if latents is None:
_SCREAMING_SNAKE_CASE = randn_tensor(A , generator=A , device=A , dtype=A )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_SCREAMING_SNAKE_CASE = latents.to(A )
_SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def snake_case_( self , A=0 ) -> Dict:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_SCREAMING_SNAKE_CASE = torch.device(f'cuda:{gpu_id}' )
_SCREAMING_SNAKE_CASE = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A , A )
def snake_case_( self , A=0 ) -> str:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_SCREAMING_SNAKE_CASE = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.unet, self.movq]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cpu_offload_with_hook(A , A , prev_module_hook=A )
# We'll offload the last model manually.
_SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case_( self ) -> Tuple:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(A , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(A )
def __call__( self , A , A , A = 512 , A = 512 , A = 100 , A = 4.0 , A = 1 , A = None , A = None , A = "pil" , A = True , ) -> List[str]:
_SCREAMING_SNAKE_CASE = self._execution_device
_SCREAMING_SNAKE_CASE = guidance_scale > 1.0
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = torch.cat(A , dim=0 )
_SCREAMING_SNAKE_CASE = image_embeds.shape[0] * num_images_per_prompt
if isinstance(A , A ):
_SCREAMING_SNAKE_CASE = torch.cat(A , dim=0 )
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(A , dim=0 )
_SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(A , dim=0 )
_SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=A )
self.scheduler.set_timesteps(A , device=A )
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps
_SCREAMING_SNAKE_CASE = self.unet.config.in_channels
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = downscale_height_and_width(A , A , self.movq_scale_factor )
# create initial latent
_SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , A , A , A , self.scheduler , )
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE = {"""image_embeds""": image_embeds}
_SCREAMING_SNAKE_CASE = self.unet(
sample=A , timestep=A , encoder_hidden_states=A , added_cond_kwargs=A , return_dict=A , )[0]
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
_SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(
A , A , A , generator=A , )[0]
# post-processing
_SCREAMING_SNAKE_CASE = self.movq.decode(A , force_not_quantize=A )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
_SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A )
| 58
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) ->Union[str, Any]:
for attribute in key.split(""".""" ):
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
_SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
_SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
_SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
_SCREAMING_SNAKE_CASE = value
else:
_SCREAMING_SNAKE_CASE = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowerCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) ->Any:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
_SCREAMING_SNAKE_CASE = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
_SCREAMING_SNAKE_CASE = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
_SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
_SCREAMING_SNAKE_CASE = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
_SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
_SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
_SCREAMING_SNAKE_CASE = """weight_v"""
elif "weight" in name:
_SCREAMING_SNAKE_CASE = """weight"""
elif "bias" in name:
_SCREAMING_SNAKE_CASE = """bias"""
else:
_SCREAMING_SNAKE_CASE = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) ->Union[str, Any]:
_SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
_SCREAMING_SNAKE_CASE = name.split(""".""" )
_SCREAMING_SNAKE_CASE = int(items[0] )
_SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_SCREAMING_SNAKE_CASE = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Union[str, Any]=True ) ->Optional[int]:
if config_path is not None:
_SCREAMING_SNAKE_CASE = HubertConfig.from_pretrained(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = HubertConfig()
if is_finetuned:
if dict_path:
_SCREAMING_SNAKE_CASE = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_SCREAMING_SNAKE_CASE = target_dict.pad_index
_SCREAMING_SNAKE_CASE = target_dict.bos_index
_SCREAMING_SNAKE_CASE = target_dict.eos_index
_SCREAMING_SNAKE_CASE = len(target_dict.symbols )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
_SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = HubertForCTC(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = HubertModel(__lowerCamelCase )
if is_finetuned:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowercase_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 58
| 1
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'vocab_file': 'spiece.model'}
lowerCamelCase_ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
lowerCamelCase_ = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
lowerCamelCase_ = '▁'
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = VOCAB_FILES_NAMES
lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Any="[CLS]" , __lowerCamelCase : Optional[int]="[SEP]" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : str="[SEP]" , __lowerCamelCase : Union[str, Any]="<pad>" , __lowerCamelCase : Dict="[CLS]" , __lowerCamelCase : Any="[MASK]" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE = do_lower_case
_SCREAMING_SNAKE_CASE = remove_space
_SCREAMING_SNAKE_CASE = keep_accents
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : List[str] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
if self.remove_space:
_SCREAMING_SNAKE_CASE = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE = inputs
_SCREAMING_SNAKE_CASE = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def lowerCAmelCase_ ( self : int , __lowerCamelCase : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self.sp_model.PieceToId(__lowerCamelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : Tuple ):
"""simple docstring"""
return self.sp_model.IdToPiece(__lowerCamelCase )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = ""
_SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCAmelCase_ ( self : int , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 356
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Dict:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> str:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__A , id=__A )
| 111
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCAmelCase : Tuple = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Union[str, Any] = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
_lowerCAmelCase : int = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
_lowerCAmelCase : List[Any] = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
_lowerCAmelCase : str = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 169
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =tempfile.mkdtemp()
# fmt: off
a__ : List[Any] =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str =dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a__ : List[Any] =["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
a__ : Optional[int] ={"unk_token": "<unk>"}
a__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
a__ : Optional[Any] ={
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
a__ : List[Any] =[Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] =self.get_tokenizer()
a__ : int =self.get_rust_tokenizer()
a__ : List[str] =self.get_image_processor()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
a__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : str =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : int =self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : str =self.get_image_processor()
a__ : Optional[int] =self.get_tokenizer()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : str =self.prepare_image_inputs()
a__ : Any =image_processor(lowerCAmelCase__ , return_tensors="np" )
a__ : Optional[int] =processor(images=lowerCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : List[Any] =self.get_tokenizer()
a__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Union[str, Any] ="lower newer"
a__ : List[str] =processor(text=lowerCAmelCase__ )
a__ : str =tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.get_image_processor()
a__ : Dict =self.get_tokenizer()
a__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict ="lower newer"
a__ : int =self.prepare_image_inputs()
a__ : Any =processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =self.get_image_processor()
a__ : Optional[Any] =self.get_tokenizer()
a__ : str =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : int =self.prepare_image_inputs()
a__ : Union[str, Any] =self.prepare_image_inputs()
a__ : Tuple =processor(images=lowerCAmelCase__ , visual_prompt=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : Any =self.get_tokenizer()
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Optional[Any] =processor.batch_decode(lowerCAmelCase__ )
a__ : Dict =tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 95
| 0
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class lowercase_ :
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any]=1 ):
"""simple docstring"""
if self.graph.get(__lowerCamelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_SCREAMING_SNAKE_CASE = [[w, v]]
if not self.graph.get(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE = []
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return list(self.graph )
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Dict ):
"""simple docstring"""
if self.graph.get(__lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__lowerCamelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : str=-2 , __lowerCamelCase : int=-1 ):
"""simple docstring"""
if s == d:
return []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
if s == -2:
_SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(__lowerCamelCase )
visited.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__lowerCamelCase ) != 0:
_SCREAMING_SNAKE_CASE = stack[len(__lowerCamelCase ) - 1]
else:
_SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(__lowerCamelCase ) == 0:
return visited
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : Union[str, Any]=-1 ):
"""simple docstring"""
if c == -1:
_SCREAMING_SNAKE_CASE = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(__lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_SCREAMING_SNAKE_CASE = floor(random() * c ) + 1
if n != i:
self.add_pair(__lowerCamelCase , __lowerCamelCase , 1 )
def lowerCAmelCase_ ( self : str , __lowerCamelCase : Any=-2 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = deque()
_SCREAMING_SNAKE_CASE = []
if s == -2:
_SCREAMING_SNAKE_CASE = list(self.graph )[0]
d.append(__lowerCamelCase )
visited.append(__lowerCamelCase )
while d:
_SCREAMING_SNAKE_CASE = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCAmelCase_ ( self : str , __lowerCamelCase : List[str] ):
"""simple docstring"""
return len(self.graph[u] )
def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Optional[Any]=-2 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
if s == -2:
_SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(__lowerCamelCase )
visited.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = s
_SCREAMING_SNAKE_CASE = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(__lowerCamelCase ) != 0:
_SCREAMING_SNAKE_CASE = stack[len(__lowerCamelCase ) - 1]
else:
_SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(__lowerCamelCase ) == 0:
return sorted_nodes
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(__lowerCamelCase )
visited.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = -2
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = s
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_SCREAMING_SNAKE_CASE = True
if len(__lowerCamelCase ) != 0:
_SCREAMING_SNAKE_CASE = stack[len(__lowerCamelCase ) - 1]
else:
_SCREAMING_SNAKE_CASE = False
indirect_parents.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = s
_SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(__lowerCamelCase ) == 0:
return list(__lowerCamelCase )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(__lowerCamelCase )
visited.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = -2
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = s
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_SCREAMING_SNAKE_CASE = True
if len(__lowerCamelCase ) != 0:
_SCREAMING_SNAKE_CASE = stack[len(__lowerCamelCase ) - 1]
else:
_SCREAMING_SNAKE_CASE = False
indirect_parents.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = s
_SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(__lowerCamelCase ) == 0:
return False
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : Union[str, Any]=-2 , __lowerCamelCase : Union[str, Any]=-1 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = time()
self.dfs(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = time()
return end - begin
def lowerCAmelCase_ ( self : int , __lowerCamelCase : Tuple=-2 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = time()
self.bfs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = time()
return end - begin
class lowercase_ :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any]=1 ):
"""simple docstring"""
# check if the u exists
if self.graph.get(__lowerCamelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_SCREAMING_SNAKE_CASE = [[w, v]]
# add the other way
if self.graph.get(__lowerCamelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_SCREAMING_SNAKE_CASE = [[w, u]]
def lowerCAmelCase_ ( self : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ):
"""simple docstring"""
if self.graph.get(__lowerCamelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(__lowerCamelCase )
# the other way round
if self.graph.get(__lowerCamelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(__lowerCamelCase )
def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : List[Any]=-2 , __lowerCamelCase : Union[str, Any]=-1 ):
"""simple docstring"""
if s == d:
return []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
if s == -2:
_SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(__lowerCamelCase )
visited.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(__lowerCamelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(__lowerCamelCase ) != 0:
_SCREAMING_SNAKE_CASE = stack[len(__lowerCamelCase ) - 1]
else:
_SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(__lowerCamelCase ) == 0:
return visited
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : Union[str, Any]=-1 ):
"""simple docstring"""
if c == -1:
_SCREAMING_SNAKE_CASE = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(__lowerCamelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_SCREAMING_SNAKE_CASE = floor(random() * c ) + 1
if n != i:
self.add_pair(__lowerCamelCase , __lowerCamelCase , 1 )
def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : List[Any]=-2 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = deque()
_SCREAMING_SNAKE_CASE = []
if s == -2:
_SCREAMING_SNAKE_CASE = list(self.graph )[0]
d.append(__lowerCamelCase )
visited.append(__lowerCamelCase )
while d:
_SCREAMING_SNAKE_CASE = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase_ ( self : str , __lowerCamelCase : Tuple ):
"""simple docstring"""
return len(self.graph[u] )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(__lowerCamelCase )
visited.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = -2
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = s
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_SCREAMING_SNAKE_CASE = True
if len(__lowerCamelCase ) != 0:
_SCREAMING_SNAKE_CASE = stack[len(__lowerCamelCase ) - 1]
else:
_SCREAMING_SNAKE_CASE = False
indirect_parents.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = s
_SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(__lowerCamelCase ) == 0:
return list(__lowerCamelCase )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = list(self.graph )[0]
stack.append(__lowerCamelCase )
visited.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = -2
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = s
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_SCREAMING_SNAKE_CASE = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_SCREAMING_SNAKE_CASE = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_SCREAMING_SNAKE_CASE = True
if len(__lowerCamelCase ) != 0:
_SCREAMING_SNAKE_CASE = stack[len(__lowerCamelCase ) - 1]
else:
_SCREAMING_SNAKE_CASE = False
indirect_parents.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = s
_SCREAMING_SNAKE_CASE = ss
# check if se have reached the starting point
if len(__lowerCamelCase ) == 0:
return False
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return list(self.graph )
def lowerCAmelCase_ ( self : str , __lowerCamelCase : List[str]=-2 , __lowerCamelCase : List[str]=-1 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = time()
self.dfs(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = time()
return end - begin
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : Optional[Any]=-2 ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = time()
self.bfs(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = time()
return end - begin
| 111
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Dict:
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__A )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> str:
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__A , id=__A )
| 111
| 1
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = sd_pipe.prepare_inputs(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = replicate(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = shard(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = jax.random.split(UpperCAmelCase__ , jax.device_count() )
__SCREAMING_SNAKE_CASE = sd_pipe(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , num_inference_steps=2_5 , jit=UpperCAmelCase__ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE = "stabilityai/stable-diffusion-2"
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCAmelCase__ , subfolder="scheduler" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
UpperCAmelCase__ , scheduler=UpperCAmelCase__ , revision="bf16" , dtype=jnp.bfloataa , )
__SCREAMING_SNAKE_CASE = scheduler_params
__SCREAMING_SNAKE_CASE = "A painting of a squirrel eating a burger"
__SCREAMING_SNAKE_CASE = jax.device_count()
__SCREAMING_SNAKE_CASE = num_samples * [prompt]
__SCREAMING_SNAKE_CASE = sd_pipe.prepare_inputs(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = replicate(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = shard(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = jax.random.split(UpperCAmelCase__ , jax.device_count() )
__SCREAMING_SNAKE_CASE = sd_pipe(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , num_inference_steps=2_5 , jit=UpperCAmelCase__ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
__SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__SCREAMING_SNAKE_CASE = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__SCREAMING_SNAKE_CASE = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 54
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119
| 0
|
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__UpperCamelCase : Dict = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
lowerCAmelCase = list(s_dict.keys() )
for key in keys:
lowerCAmelCase = R'.*/layers_(\d+)'
lowerCAmelCase = key
if re.match(_lowercase , _lowercase ):
lowerCAmelCase = re.sub(R'layers_(\d+)' , R'block/\1/layer' , _lowercase )
lowerCAmelCase = R'(encoder|decoder)\/'
if re.match(_lowercase , _lowercase ):
lowerCAmelCase = re.match(_lowercase , _lowercase ).groups()
if groups[0] == "encoder":
lowerCAmelCase = re.sub(R'/mlp/' , R'/1/mlp/' , _lowercase )
lowerCAmelCase = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , _lowercase )
elif groups[0] == "decoder":
lowerCAmelCase = re.sub(R'/mlp/' , R'/2/mlp/' , _lowercase )
lowerCAmelCase = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , _lowercase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCAmelCase = new_key.replace(_lowercase , _lowercase )
print(F'{key} -> {new_key}' )
lowerCAmelCase = s_dict.pop(_lowercase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCAmelCase = s_dict[key].shape[0]
lowerCAmelCase = s_dict[key]
for idx in range(_lowercase ):
lowerCAmelCase = expert_weihts[idx]
print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(_lowercase )
return s_dict
__UpperCamelCase : Tuple = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ):
import regex as re
with open(_lowercase , 'r' ) as f:
lowerCAmelCase = f.read()
lowerCAmelCase = re.findall(R'(.*) = ([0-9.]*)' , _lowercase )
lowerCAmelCase = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCAmelCase = float(_lowercase ) if '.' in value else int(_lowercase )
lowerCAmelCase = re.findall(R'(.*activations) = \(\'(.*)\',\)' , _lowercase )[0]
lowerCAmelCase = str(activation[1] )
lowerCAmelCase = num_experts
lowerCAmelCase = SwitchTransformersConfig(**_lowercase )
return config
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]="./" , _UpperCAmelCase : Any=8 ):
print(F'Loading flax weights from : {flax_checkpoint_path}' )
lowerCAmelCase = checkpoints.load_tax_checkpoint(_lowercase )
if gin_file is not None:
lowerCAmelCase = convert_gin_to_config(_lowercase , _lowercase )
else:
lowerCAmelCase = SwitchTransformersConfig.from_pretrained(_lowercase )
lowerCAmelCase = SwitchTransformersForConditionalGeneration(_lowercase )
lowerCAmelCase = flax_params['target']
lowerCAmelCase = flatten_dict(_lowercase , sep='/' )
lowerCAmelCase = rename_keys(_lowercase )
lowerCAmelCase = unflatten_dict(_lowercase , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_lowercase , _lowercase )
print(F'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
__UpperCamelCase : List[str] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 359
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
for dir_path, dir_names, filenames in os.walk(_UpperCAmelCase ):
lowerCAmelCase = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCAmelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCAmelCase , _UpperCAmelCase ).lstrip('./' )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ):
return F'{i * " "}*' if i else "\n##"
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str ):
lowerCAmelCase = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCAmelCase ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(_UpperCAmelCase )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str = "." ):
lowerCAmelCase = ''
for filepath in sorted(good_file_paths(_UpperCAmelCase ) ):
lowerCAmelCase ,lowerCAmelCase = os.path.split(_UpperCAmelCase )
if filepath != old_path:
lowerCAmelCase = print_path(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = (filepath.count(os.sep ) + 1) if filepath else 0
lowerCAmelCase = F'{filepath}/{filename}'.replace(' ' , '%20' )
lowerCAmelCase = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(_UpperCAmelCase )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 309
| 0
|
"""simple docstring"""
from typing import Any
def snake_case_ ( A_ : list ):
'''simple docstring'''
if not input_list:
return []
_lowerCamelCase : Dict = [input_list.count(A_ ) for value in input_list]
_lowerCamelCase : Union[str, Any] = max(A_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(A_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _A ( A__=None ):
"""simple docstring"""
if subparsers is not None:
__lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=A__ , default=A__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=A__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=A__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=A__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def _A ( A__ ):
"""simple docstring"""
__lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(A__ ):
__lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
__lowercase = defaults.commands
if not args.tpu_name:
__lowercase = defaults.tpu_name
if not args.tpu_zone:
__lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , A__ ):
__lowercase = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , A__ ):
__lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
__lowercase = '''; '''.join(A__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(A__ )}" )
return
subprocess.run(A__ )
print('''Successfully setup pod.''' )
def _A ( ):
"""simple docstring"""
__lowercase = tpu_command_parser()
__lowercase = parser.parse_args()
tpu_command_launcher(A__ )
| 104
| 0
|
import math
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A ( SCREAMING_SNAKE_CASE : float = 0.1 ):
"""simple docstring"""
a__ : Optional[Any] =3
a__ : Tuple =3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(SCREAMING_SNAKE_CASE )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__)
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True})
_lowercase : ClassVar[Features] = Features({"""image""": Image()})
_lowercase : ClassVar[Features] = Features({"""labels""": ClassLabel})
_lowercase : str = "image"
_lowercase : str = "labels"
def _lowercase ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
a__ : int =copy.deepcopy(self )
a__ : Dict =self.label_schema.copy()
a__ : int =features[self.label_column]
a__ : str =label_schema
return task_template
@property
def _lowercase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 148
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCamelCase__ = ["""small""", """medium""", """large"""]
lowerCamelCase__ = """lm_head.decoder.weight"""
lowerCamelCase__ = """lm_head.weight"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[int] = torch.load(__lowerCamelCase )
__lowerCAmelCase : Any = d.pop(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--dialogpt_path""", default=""".""", type=str)
lowerCamelCase__ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCamelCase__ = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl')
lowerCamelCase__ = f'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 86
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase :
def __init__(self : Optional[Any] , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
snake_case : Optional[Any] = model
snake_case : Dict = kwargs.get("model_save_dir" , snake_case__ )
snake_case : int = kwargs.get("latest_model_name" , snake_case__ )
def __call__(self : Tuple , **snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = {k: np.array(snake_case__ ) for k, v in kwargs.items()}
return self.model.run(snake_case__ , snake_case__ )
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : Union[str, Path] , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None ) -> Any:
'''simple docstring'''
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
snake_case : Optional[int] = "CPUExecutionProvider"
return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
snake_case : Any = self.model_save_dir.joinpath(self.latest_model_name )
snake_case : str = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
snake_case : List[str] = self.model_save_dir.joinpath(snake_case__ )
if src_path.exists():
snake_case : Tuple = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[int] , ) -> str:
'''simple docstring'''
if os.path.isfile(snake_case__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
# saving model weights/files
self._save_pretrained(snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Tuple , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : Tuple , ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(snake_case__ ):
snake_case : Any = OnnxRuntimeModel.load_model(
os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ )
snake_case : Union[str, Any] = Path(snake_case__ )
# load model from hub
else:
# download model
snake_case : Dict = hf_hub_download(
repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , )
snake_case : List[Any] = Path(snake_case__ ).parent
snake_case : Union[str, Any] = Path(snake_case__ ).name
snake_case : Dict = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ )
return cls(model=snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Optional[Any] , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : Dict , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = None
if len(str(snake_case__ ).split("@" ) ) == 2:
snake_case , snake_case : int = model_id.split("@" )
return cls._from_pretrained(
model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
| 59
| 0
|
import torch
from torch import nn
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__(self , __a , __a , __a , __a , __a=1 , __a=False ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = n_token
UpperCAmelCase__ = d_embed
UpperCAmelCase__ = d_proj
UpperCAmelCase__ = cutoffs + [n_token]
UpperCAmelCase__ = [0] + self.cutoffs
UpperCAmelCase__ = div_val
UpperCAmelCase__ = self.cutoffs[0]
UpperCAmelCase__ = len(self.cutoffs ) - 1
UpperCAmelCase__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase__ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCAmelCase__ = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCAmelCase__ = nn.ModuleList()
UpperCAmelCase__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(__a , __a ) ) )
else:
self.out_projs.append(__a )
self.out_layers.append(nn.Linear(__a , __a ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(__a , __a ) ) )
self.out_layers.append(nn.Linear(__a , r_idx - l_idx ) )
UpperCAmelCase__ = keep_order
def UpperCamelCase__ (self , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
if proj is None:
UpperCAmelCase__ = nn.functional.linear(__a , __a , bias=__a )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase__ = nn.functional.linear(__a , proj.t().contiguous() )
UpperCAmelCase__ = nn.functional.linear(__a , __a , bias=__a )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase__ (self , __a , __a=None , __a=False ) -> List[str]:
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase__ = hidden[..., :-1, :].contiguous()
UpperCAmelCase__ = labels[..., 1:].contiguous()
UpperCAmelCase__ = hidden.view(-1 , hidden.size(-1 ) )
UpperCAmelCase__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
UpperCAmelCase__ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCAmelCase__ = self._compute_logit(__a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCAmelCase__ = labels != -100
UpperCAmelCase__ = torch.zeros_like(__a , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase__ = (
-nn.functional.log_softmax(__a , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCAmelCase__ = nn.functional.log_softmax(__a , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ = self.out_layers[i].weight
UpperCAmelCase__ = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__a )
biases.append(__a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ = self._compute_logit(__a , __a , __a , __a )
UpperCAmelCase__ = nn.functional.log_softmax(__a , dim=1 )
if labels is None:
UpperCAmelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCAmelCase__ = torch.zeros_like(__a , dtype=hidden.dtype , device=hidden.device )
UpperCAmelCase__ = 0
UpperCAmelCase__ = [0] + self.cutoffs
for i in range(len(__a ) - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase__ = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase__ = labels.index_select(0 , __a ) - l_idx
UpperCAmelCase__ = head_logprob.index_select(0 , __a )
UpperCAmelCase__ = hidden.index_select(0 , __a )
else:
UpperCAmelCase__ = hidden
if i == 0:
if labels is not None:
UpperCAmelCase__ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ = self._compute_logit(__a , __a , __a , __a )
UpperCAmelCase__ = nn.functional.log_softmax(__a , dim=1 )
UpperCAmelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCAmelCase__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase__ = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , __a , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
if self.n_clusters == 0:
UpperCAmelCase__ = self._compute_logit(__a , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(__a , dim=-1 )
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ = self.out_layers[i].weight
UpperCAmelCase__ = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCAmelCase__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(__a )
biases.append(__a )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ = self._compute_logit(__a , __a , __a , __a )
UpperCAmelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCAmelCase__ = nn.functional.log_softmax(__a , dim=1 )
UpperCAmelCase__ = [0] + self.cutoffs
for i in range(len(__a ) - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ = self._compute_logit(__a , __a , __a , __a )
UpperCAmelCase__ = nn.functional.log_softmax(__a , dim=1 )
UpperCAmelCase__ = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase__ = logprob_i
return out
| 335
|
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
'''simple docstring'''
def __init__(self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = ''
UpperCAmelCase__ = ''
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
UpperCAmelCase__ = 256
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
UpperCAmelCase__ = 0
def UpperCamelCase__ (self , __a ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = cva.imread(__a , 0 )
UpperCAmelCase__ = copy.deepcopy(self.img )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' )
UpperCAmelCase__ = np.sum(__a )
for i in range(len(__a ) ):
UpperCAmelCase__ = x[i] / self.k
self.sk += prk
UpperCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase__ = int(last % last )
UpperCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__a )
UpperCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase__ = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def UpperCamelCase__ (self ) -> Optional[int]:
"""simple docstring"""
plt.hist(self.img.ravel() , 256 , [0, 256] )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
_UpperCamelCase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
_UpperCamelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 335
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCamelCase :
def __init__( self : Dict , __UpperCAmelCase : List[str] , ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = 1_3
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = 9_9
SCREAMING_SNAKE_CASE__ = 3_2
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 3_7
SCREAMING_SNAKE_CASE__ = '''gelu'''
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 0.1
SCREAMING_SNAKE_CASE__ = 5_1_2
SCREAMING_SNAKE_CASE__ = 1_6
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 0.02
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = None
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
(
SCREAMING_SNAKE_CASE__
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = TFEsmModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : int , ) -> int:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = TFEsmModel(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase )
# Also check the case where encoder outputs are not passed
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = TFEsmForMaskedLM(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = TFEsmForTokenClassification(config=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE__
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Union[str, Any] = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Optional[int] = False
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
SCREAMING_SNAKE_CASE__ = TFEsmModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = TFEsmModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
SCREAMING_SNAKE_CASE__ = model.get_bias()
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
for k, v in name.items():
assert isinstance(__UpperCAmelCase , tf.Variable )
else:
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE__ = model.get_bias()
assert name is None
@require_tf
class lowerCamelCase (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
SCREAMING_SNAKE_CASE__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , __UpperCAmelCase )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.constant(
[
[
[8.921_518, -1_0.5_8_9_8_1_4, -6.4_671_307],
[-6.3_967_156, -1_3.9_1_1_3_7_7, -1.1_211_915],
[-7.781_247, -1_3.9_5_1_5_5_7, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
SCREAMING_SNAKE_CASE__ = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 165
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
snake_case_ : Optional[int] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
snake_case_ : Optional[Any] = {
"yjernite/retribert-base-uncased": 5_12,
}
snake_case_ : Union[str, Any] = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class __a (lowerCamelCase ):
__a : Optional[Any] = VOCAB_FILES_NAMES
__a : Dict = PRETRAINED_VOCAB_FILES_MAP
__a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : int = PRETRAINED_INIT_CONFIGURATION
__a : Union[str, Any] = RetriBertTokenizer
__a : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : str , __magic_name__ : List[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : Any=True , __magic_name__ : int="[UNK]" , __magic_name__ : List[Any]="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : Optional[int]="[CLS]" , __magic_name__ : Union[str, Any]="[MASK]" , __magic_name__ : int=True , __magic_name__ : Optional[Any]=None , **__magic_name__ : Any , ) -> List[str]:
"""simple docstring"""
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
UpperCAmelCase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __magic_name__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __magic_name__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __magic_name__ ) != tokenize_chinese_chars
):
UpperCAmelCase_ : Dict = getattr(__magic_name__ , normalizer_state.pop('''type''' ) )
UpperCAmelCase_ : Optional[int] = do_lower_case
UpperCAmelCase_ : Optional[int] = strip_accents
UpperCAmelCase_ : Tuple = tokenize_chinese_chars
UpperCAmelCase_ : Optional[int] = normalizer_class(**__magic_name__ )
UpperCAmelCase_ : List[str] = do_lower_case
def UpperCAmelCase__ ( self : int , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=None ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ : Dict = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCAmelCase_ : int = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 125
| 0
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A_ ( unittest.TestCase ):
def _lowerCAmelCase (self :Optional[int] )-> str:
__A = '''ylacombe/bark-small'''
__A = tempfile.mkdtemp()
__A = '''en_speaker_1'''
__A = '''This is a test string'''
__A = '''speaker_embeddings_path.json'''
__A = '''speaker_embeddings'''
def _lowerCAmelCase (self :Optional[int] , **_UpperCamelCase :Tuple )-> Optional[int]:
return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCamelCase )
def _lowerCAmelCase (self :Union[str, Any] )-> Optional[int]:
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase (self :Optional[Any] )-> Optional[Any]:
__A = self.get_tokenizer()
__A = BarkProcessor(tokenizer=_UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
__A = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _lowerCAmelCase (self :str )-> List[str]:
__A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__A = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__A = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _lowerCAmelCase (self :Optional[Any] )-> Any:
__A = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__A = 35
__A = 2
__A = 8
__A = {
'''semantic_prompt''': np.ones(_UpperCamelCase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__A = processor(text=self.input_string , voice_preset=_UpperCamelCase )
__A = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__A = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(_UpperCamelCase , **_UpperCamelCase )
__A = processor(text=self.input_string , voice_preset=_UpperCamelCase )
__A = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__A = processor(text=self.input_string , voice_preset=self.voice_preset )
def _lowerCAmelCase (self :Tuple )-> Optional[int]:
__A = self.get_tokenizer()
__A = BarkProcessor(tokenizer=_UpperCamelCase )
__A = processor(text=self.input_string )
__A = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 250
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case__ : Tuple = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Union[str, Any] = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[int] = ['LayoutLMv2FeatureExtractor']
snake_case__ : Dict = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
snake_case__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 250
| 1
|
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase__ : Optional[int] = 2
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , *, # begin keyword-only arguments
lowerCAmelCase__ : List[str]="<s>" , lowerCAmelCase__ : Union[str, Any]="<pad>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Dict="<unk>" , lowerCAmelCase__ : Any=None , ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = bos, unk, pad, eos
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = {}
_UpperCamelCase = self.add_symbol(_a )
_UpperCamelCase = self.add_symbol(_a )
_UpperCamelCase = self.add_symbol(_a )
_UpperCamelCase = self.add_symbol(_a )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_a )
_UpperCamelCase = len(self.symbols )
def __eq__( self : int , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Union[str, Any] , lowerCAmelCase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ) -> Any:
'''simple docstring'''
return len(self.symbols )
def __contains__( self : int , lowerCAmelCase__ : Dict ) -> List[Any]:
'''simple docstring'''
return sym in self.indices
@classmethod
def snake_case__ ( cls : str , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = cls()
d.add_from_file(_a )
return d
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Any=False ) -> str:
'''simple docstring'''
if word in self.indices and not overwrite:
_UpperCamelCase = self.indices[word]
_UpperCamelCase = self.count[idx] + n
return idx
else:
_UpperCamelCase = len(self.symbols )
_UpperCamelCase = idx
self.symbols.append(_a )
self.count.append(_a )
return idx
def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
return 0
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : int ) -> Optional[int]:
'''simple docstring'''
if isinstance(_a , _a ):
try:
with open(_a , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(_a ) )
return
_UpperCamelCase = f.readlines()
_UpperCamelCase = self._load_meta(_a )
for line in lines[indices_start_line:]:
try:
_UpperCamelCase , _UpperCamelCase = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
_UpperCamelCase = True
_UpperCamelCase , _UpperCamelCase = line.rsplit(''' ''' , 1 )
else:
_UpperCamelCase = False
_UpperCamelCase = int(_a )
_UpperCamelCase = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(_a ) )
self.add_symbol(_a , n=_a , overwrite=_a )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def a__ ( lowercase : Tuple ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = dict((re.sub(r'''@@$''', '''''', snake_case__ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''', '''</w>''', snake_case__ ), v) for k, v in d.items() )
_UpperCamelCase = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
_UpperCamelCase = d[k] # restore
return da
def a__ ( lowercase : Optional[int], lowercase : List[Any] ) -> Dict:
"""simple docstring"""
if not os.path.exists(snake_case__ ):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(snake_case__, exist_ok=snake_case__ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_UpperCamelCase = os.path.join(snake_case__, '''checkpoint.pt''' )
if not os.path.isfile(snake_case__ ):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" )
_UpperCamelCase = torch.load(snake_case__, map_location='''cpu''' )
_UpperCamelCase = chkpt['''cfg''']['''model''']
# dicts
_UpperCamelCase = os.path.join(snake_case__, '''dict.txt''' )
if not os.path.isfile(snake_case__ ):
raise ValueError(F"""path to the file {dict_file} does not exist!""" )
_UpperCamelCase = Dictionary.load(snake_case__ )
_UpperCamelCase = rewrite_dict_keys(src_dict.indices )
_UpperCamelCase = len(snake_case__ )
_UpperCamelCase = os.path.join(snake_case__, VOCAB_FILES_NAMES['''vocab_file'''] )
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(snake_case__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__, ensure_ascii=snake_case__, indent=snake_case__ ) )
# merges_file (bpecodes)
_UpperCamelCase = os.path.join(snake_case__, '''bpecodes''' )
if not os.path.isfile(snake_case__ ):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" )
_UpperCamelCase = os.path.join(snake_case__, VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(snake_case__, snake_case__ )
# model config
_UpperCamelCase = os.path.join(snake_case__, '''config.json''' )
_UpperCamelCase = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""" )
with open(snake_case__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__, ensure_ascii=snake_case__, indent=snake_case__ ) )
# tokenizer config
_UpperCamelCase = os.path.join(snake_case__, snake_case__ )
_UpperCamelCase = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(F"""Generating {biogpt_tokenizer_config_file}""" )
with open(snake_case__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(snake_case__, ensure_ascii=snake_case__, indent=snake_case__ ) )
# model
_UpperCamelCase = chkpt['''model''']
# remove unneeded keys
_UpperCamelCase = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(snake_case__, snake_case__ )
_UpperCamelCase = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
_UpperCamelCase = model_state_dict.pop(snake_case__ )
else:
_UpperCamelCase = model_state_dict.pop(snake_case__ )
_UpperCamelCase = BioGptConfig.from_pretrained(snake_case__ )
_UpperCamelCase = BioGptForCausalLM(snake_case__ )
# check that it loads ok
model_new.load_state_dict(snake_case__ )
# save
_UpperCamelCase = os.path.join(snake_case__, snake_case__ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(snake_case__, snake_case__ )
print('''Conversion is done!''' )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ : Tuple = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 324
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCamelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _lowerCAmelCase ( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowerCamelCase = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
lowerCamelCase = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
lowerCamelCase = 3
lowerCamelCase = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
lowerCamelCase = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
lowerCamelCase = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowerCamelCase = generator.model.config.eos_token_id
lowerCamelCase = """<pad>"""
lowerCamelCase = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowerCamelCase = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 291
| 0
|
"""simple docstring"""
from collections import defaultdict
from math import gcd
def lowerCamelCase__ ( __snake_case = 1_50_00_00 ) -> int:
"""simple docstring"""
_UpperCamelCase = defaultdict(__snake_case )
_UpperCamelCase = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, __snake_case, 2 ):
if gcd(__snake_case, __snake_case ) > 1:
continue
_UpperCamelCase = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__snake_case, limit + 1, __snake_case ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'xlm-roberta-xl'
def __init__( self , __a=25_08_80 , __a=25_60 , __a=36 , __a=32 , __a=1_02_40 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_14 , __a=1 , __a=0.02 , __a=1e-05 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , __a=None , **__a , ) -> Tuple:
'''simple docstring'''
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a)
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
class _UpperCAmelCase( lowerCamelCase ):
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_UpperCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 100
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
snake_case__ : List[Any] = logging.get_logger(__name__)
def _snake_case ( _snake_case : Tuple ):
if isinstance(_snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_snake_case , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_snake_case ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class snake_case_( a__ ):
__UpperCamelCase = ['''pixel_values''']
def __init__( self : Optional[int] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , **UpperCamelCase_ : Tuple , ):
super().__init__(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_5_6}
lowerCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowerCAmelCase : Tuple = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowerCAmelCase : Dict = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
lowerCAmelCase : Any = do_resize
lowerCAmelCase : Union[str, Any] = size
lowerCAmelCase : List[str] = do_center_crop
lowerCAmelCase : int = crop_size
lowerCAmelCase : Dict = resample
lowerCAmelCase : Dict = do_rescale
lowerCAmelCase : Any = rescale_factor
lowerCAmelCase : List[Any] = offset
lowerCAmelCase : Tuple = do_normalize
lowerCAmelCase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
if "shortest_edge" in size:
lowerCAmelCase : List[str] = get_resize_output_image_size(UpperCamelCase_ , size['''shortest_edge'''] , default_to_square=UpperCamelCase_ )
elif "height" in size and "width" in size:
lowerCAmelCase : Any = (size['''height'''], size['''width'''])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Union[str, Any] , ):
lowerCAmelCase : Tuple = get_size_dict(UpperCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Optional[Any] , ):
lowerCAmelCase : List[str] = image.astype(np.floataa )
if offset:
lowerCAmelCase : Union[str, Any] = image - (scale / 2)
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Any , ):
return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
lowerCAmelCase : List[str] = to_numpy_array(UpperCamelCase_ )
if do_resize:
lowerCAmelCase : Optional[int] = self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ )
if do_center_crop:
lowerCAmelCase : List[str] = self.center_crop(UpperCamelCase_ , size=UpperCamelCase_ )
if do_rescale:
lowerCAmelCase : str = self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , offset=UpperCamelCase_ )
if do_normalize:
lowerCAmelCase : Optional[int] = self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ )
lowerCAmelCase : str = to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ )
return image
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : ImageInput , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : float = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : List[str] , ):
lowerCAmelCase : str = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase : Any = resample if resample is not None else self.resample
lowerCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase : str = offset if offset is not None else self.offset
lowerCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase : Any = image_std if image_std is not None else self.image_std
lowerCAmelCase : List[str] = size if size is not None else self.size
lowerCAmelCase : Tuple = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ )
lowerCAmelCase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase : Any = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowerCAmelCase : List[str] = make_batched(UpperCamelCase_ )
lowerCAmelCase : Dict = [
[
self._preprocess_image(
image=UpperCamelCase_ , do_resize=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , do_center_crop=UpperCamelCase_ , crop_size=UpperCamelCase_ , do_rescale=UpperCamelCase_ , rescale_factor=UpperCamelCase_ , offset=UpperCamelCase_ , do_normalize=UpperCamelCase_ , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ , data_format=UpperCamelCase_ , )
for img in video
]
for video in videos
]
lowerCAmelCase : Optional[Any] = {'''pixel_values''': videos}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 60
|
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _UpperCamelCase ( UpperCamelCase__ ):
return x + 2
class _snake_case ( unittest.TestCase ):
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = """x = 3"""
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3})
UpperCAmelCase__ : Optional[int] = """x = y"""
UpperCAmelCase__ : Optional[Any] = {"""y""": 5}
UpperCAmelCase__ : Dict = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 5, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Any = """y = add_two(x)"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
# Won't work without the tool
with CaptureStdout() as out:
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result is None
assert "tried to execute add_two" in out.out
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """x = 3"""
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """test_dict = {'x': x, 'y': add_two(x)}"""
UpperCAmelCase__ : Any = {"""x""": 3}
UpperCAmelCase__ : List[str] = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}})
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = """x = 3\ny = 5"""
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Dict = """text = f'This is x: {x}.'"""
UpperCAmelCase__ : str = {"""x""": 3}
UpperCAmelCase__ : Optional[Any] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """text""": """This is x: 3."""})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """if x <= 3:\n y = 2\nelse:\n y = 5"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 2})
UpperCAmelCase__ : Optional[int] = {"""x""": 8}
UpperCAmelCase__ : int = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 8, """y""": 5})
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = """test_list = [x, add_two(x)]"""
UpperCAmelCase__ : int = {"""x""": 3}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
self.assertListEqual(_lowerCamelCase , [3, 5])
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_list""": [3, 5]})
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = """y = x"""
UpperCAmelCase__ : Optional[Any] = {"""x""": 3}
UpperCAmelCase__ : Optional[int] = evaluate(_lowerCamelCase , {} , state=_lowerCamelCase)
assert result == 3
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """y""": 3})
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = """test_list = [x, add_two(x)]\ntest_list[1]"""
UpperCAmelCase__ : Union[str, Any] = {"""x""": 3}
UpperCAmelCase__ : int = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_list""": [3, 5]})
UpperCAmelCase__ : List[str] = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"""
UpperCAmelCase__ : Any = {"""x""": 3}
UpperCAmelCase__ : Dict = evaluate(_lowerCamelCase , {"""add_two""": add_two} , state=_lowerCamelCase)
assert result == 5
self.assertDictEqual(_lowerCamelCase , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}})
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = """x = 0\nfor i in range(3):\n x = i"""
UpperCAmelCase__ : str = {}
UpperCAmelCase__ : Tuple = evaluate(_lowerCamelCase , {"""range""": range} , state=_lowerCamelCase)
assert result == 2
self.assertDictEqual(_lowerCamelCase , {"""x""": 2, """i""": 2})
| 163
| 0
|
"""simple docstring"""
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 53
|
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowercase : List[Any] = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(_UpperCAmelCase )
else:
lowercase : str = sylvester(number - 1 )
lowercase : Union[str, Any] = num - 1
lowercase : List[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 53
| 1
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ : ArgumentParser ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" ,type=lowerCamelCase__ ,default=lowerCamelCase__ ,help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" ,action="""store_true""" ,help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" ,action="""store_true""" ,help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" ,)
download_parser.add_argument("""model""" ,type=lowerCamelCase__ ,help="""Name of the model to download""" )
download_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : Union[str, Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,lowerCamelCase__ : bool ,lowerCamelCase__ : bool ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = model
SCREAMING_SNAKE_CASE = cache
SCREAMING_SNAKE_CASE = force
SCREAMING_SNAKE_CASE = trust_remote_code
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 296
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 296
| 1
|
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _lowerCAmelCase ( lowercase ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = WavaVecaPhonemeCTCTokenizer
__UpperCAmelCase : int = False
def _lowercase ( self : List[str] ):
super().setUp()
__lowercase = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
__lowercase = dict(zip(UpperCAmelCase__, range(len(UpperCAmelCase__ ) ) ) )
__lowercase = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
__lowercase = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
def _lowercase ( self : List[Any], UpperCAmelCase__ : str, UpperCAmelCase__ : Dict=False, UpperCAmelCase__ : Dict=2_0, UpperCAmelCase__ : List[Any]=5 ):
__lowercase = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=UpperCAmelCase__ )) for i in range(len(UpperCAmelCase__ ) )]
__lowercase = list(filter(lambda UpperCAmelCase__ : [t[0]] == tokenizer.encode(t[1], do_phonemize=UpperCAmelCase__ ), UpperCAmelCase__ ) )
if max_length is not None and len(UpperCAmelCase__ ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(UpperCAmelCase__ ) < min_length and len(UpperCAmelCase__ ) > 0:
while len(UpperCAmelCase__ ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(UpperCAmelCase__, clean_up_tokenization_spaces=UpperCAmelCase__ )
if " " not in output_txt and len(UpperCAmelCase__ ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=UpperCAmelCase__ )
+ " "
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=UpperCAmelCase__ )
)
if with_prefix_space:
__lowercase = " " + output_txt
__lowercase = tokenizer.encode(UpperCAmelCase__, add_special_tokens=UpperCAmelCase__ )
return output_txt, output_ids
def _lowercase ( self : Tuple, **UpperCAmelCase__ : List[str] ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname, **UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ):
__lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
__lowercase = tokenizer("m xxx ɪ", do_phonemize=UpperCAmelCase__ ).input_ids
self.assertEqual(UpperCAmelCase__, [1_3, 3_9_2, 1_7] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
__lowercase = tokenizer("m aaa ɪ ccc", do_phonemize=UpperCAmelCase__ ).input_ids
self.assertEqual(UpperCAmelCase__, [1_3, 3_9_3, 1_7, 3_9_5] ) # aaa and ccc should be after xxx and 2 after aaa
__lowercase = tokenizer("maɪ c", do_phonemize=UpperCAmelCase__ ).input_ids
self.assertEqual(UpperCAmelCase__, [3, 2_0_0] ) # mai should be <unk> (=3)
def _lowercase ( self : List[str] ):
__lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowercase = "Hello how are you"
__lowercase = tokenizer.phonemize(UpperCAmelCase__, phonemizer_lang="en-us" )
self.assertEqual(UpperCAmelCase__, "h ə l oʊ h aʊ ɑːɹ j uː" )
def _lowercase ( self : List[Any] ):
__lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowercase = "Hello how are you"
__lowercase = tokenizer.phonemize(UpperCAmelCase__, phonemizer_lang="en-us" )
self.assertEqual(tokenizer(UpperCAmelCase__ ).input_ids, tokenizer(UpperCAmelCase__, do_phonemize=UpperCAmelCase__ ).input_ids )
def _lowercase ( self : int ):
__lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowercase = "Hello how are you"
__lowercase = tokenizer.phonemize(UpperCAmelCase__, phonemizer_lang="en-us" )
__lowercase = tokenizer.decode(tokenizer(UpperCAmelCase__ ).input_ids )
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowercase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__, batch_tokens[0] )
self.assertEqual(UpperCAmelCase__, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def _lowercase ( self : int ):
__lowercase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__lowercase = "Hello how are you"
__lowercase = tokenizer.phonemize(UpperCAmelCase__, phonemizer_lang="en-us" )
self.assertEqual(UpperCAmelCase__, "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def _lowercase ( self : Any ):
__lowercase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__lowercase = "Hello how are you"
__lowercase = tokenizer.phonemize(UpperCAmelCase__, phonemizer_lang="en-us" )
self.assertEqual(tokenizer(UpperCAmelCase__ ).input_ids, tokenizer(UpperCAmelCase__, do_phonemize=UpperCAmelCase__ ).input_ids )
def _lowercase ( self : Optional[int] ):
__lowercase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
__lowercase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0] )
__lowercase = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__, batch_tokens[0] )
self.assertEqual(UpperCAmelCase__, ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
__lowercase = tokenizer.decode(sample_ids[0], filter_word_delimiter_token=UpperCAmelCase__ )
__lowercase = tokenizer.batch_decode(UpperCAmelCase__, filter_word_delimiter_token=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__, batch_tokens[0] )
self.assertEqual(UpperCAmelCase__, ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def _lowercase ( self : List[Any] ):
__lowercase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__lowercase = "Hello how are you"
__lowercase = tokenizer.phonemize(UpperCAmelCase__, phonemizer_lang="en-us" )
__lowercase = tokenizer.decode(tokenizer(UpperCAmelCase__ ).input_ids, filter_word_delimiter_token=UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : Any ):
__lowercase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__lowercase = "Hello how are you"
__lowercase = tokenizer.phonemize(UpperCAmelCase__, phonemizer_lang="en-us" )
__lowercase = tokenizer.decode(tokenizer(UpperCAmelCase__ ).input_ids, filter_word_delimiter_token=UpperCAmelCase__ )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip(), UpperCAmelCase__ )
def _lowercase ( self : Tuple ):
__lowercase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft", word_delimiter_token=UpperCAmelCase__ )
__lowercase = "Hello how are you"
__lowercase = tokenizer(UpperCAmelCase__, phonemizer_lang="en-us" ).input_ids
__lowercase = tokenizer(UpperCAmelCase__, phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = tokenizer.decode(UpperCAmelCase__ )
__lowercase = tokenizer.decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__, "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(UpperCAmelCase__, "ɛ l o h aʊ a ʁ j u" )
def _lowercase ( self : Optional[Any] ):
__lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowercase = "Hello how Are you"
__lowercase = "hello how are you"
__lowercase = tokenizer(UpperCAmelCase__ ).input_ids
__lowercase = tokenizer(UpperCAmelCase__ ).input_ids
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : Dict ):
__lowercase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
__lowercase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
__lowercase = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__, ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def _lowercase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str ):
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _lowercase ( self : Optional[int] ):
__lowercase = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__lowercase = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
__lowercase = tokenizer.decode(UpperCAmelCase__, output_char_offsets=UpperCAmelCase__, filter_word_delimiter_token=UpperCAmelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ), 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(UpperCAmelCase__, UpperCAmelCase__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"], "char" ) ), outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "char" ), ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "start_offset" ), [0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"], "end_offset" ), [1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7] )
def _lowercase ( self : int ):
__lowercase = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int] ):
self.assertTrue(isinstance(UpperCAmelCase__, UpperCAmelCase__ ) )
self.assertTrue(isinstance(outputs_list[0], UpperCAmelCase__ ) )
# transform list to ModelOutput
__lowercase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"], outputs_batch_a["text"] )
def recursive_check(UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any] ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
[recursive_check(UpperCAmelCase__, UpperCAmelCase__ ) for la, la in zip(UpperCAmelCase__, UpperCAmelCase__ )]
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"], outputs_batch_a["char_offsets"] )
# fmt: off
__lowercase = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__lowercase = tokenizer.batch_decode(UpperCAmelCase__, output_char_offsets=UpperCAmelCase__ )
__lowercase = [tokenizer.decode(UpperCAmelCase__, output_char_offsets=UpperCAmelCase__ ) for ids in sample_ids]
check_list_tuples_equal(UpperCAmelCase__, UpperCAmelCase__ )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def _lowercase ( self : Any ):
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def _lowercase ( self : Union[str, Any] ):
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def _lowercase ( self : Union[str, Any] ):
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def _lowercase ( self : Tuple ):
pass
def _lowercase ( self : Any ):
__lowercase = self.get_tokenizers(do_lower_case=UpperCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowercase = tokenizer.vocab_size
__lowercase = len(UpperCAmelCase__ )
self.assertNotEqual(UpperCAmelCase__, 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__lowercase = tokenizer.add_tokens(UpperCAmelCase__ )
__lowercase = tokenizer.vocab_size
__lowercase = len(UpperCAmelCase__ )
self.assertNotEqual(UpperCAmelCase__, 0 )
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__, len(UpperCAmelCase__ ) )
self.assertEqual(UpperCAmelCase__, all_size + len(UpperCAmelCase__ ) )
__lowercase = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l", add_special_tokens=UpperCAmelCase__ )
self.assertGreaterEqual(len(UpperCAmelCase__ ), 4 )
self.assertGreater(tokens[0], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1 )
__lowercase = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__lowercase = tokenizer.add_special_tokens(UpperCAmelCase__ )
__lowercase = tokenizer.vocab_size
__lowercase = len(UpperCAmelCase__ )
self.assertNotEqual(UpperCAmelCase__, 0 )
self.assertEqual(UpperCAmelCase__, UpperCAmelCase__ )
self.assertEqual(UpperCAmelCase__, len(UpperCAmelCase__ ) )
self.assertEqual(UpperCAmelCase__, all_size_a + len(UpperCAmelCase__ ) )
__lowercase = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l", add_special_tokens=UpperCAmelCase__ )
self.assertGreaterEqual(len(UpperCAmelCase__ ), 6 )
self.assertGreater(tokens[0], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0], tokens[1] )
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3], tokens[-4] )
self.assertEqual(tokens[0], tokenizer.eos_token_id )
self.assertEqual(tokens[-3], tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _lowercase ( self : str ):
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def _lowercase ( self : List[str] ):
pass
def _lowercase ( self : Union[str, Any] ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__lowercase = self.get_tokenizers(fast=UpperCAmelCase__, do_lower_case=UpperCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowercase = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
__lowercase = tokenizer.convert_tokens_to_string(UpperCAmelCase__ )
self.assertIsInstance(output["text"], UpperCAmelCase__ )
| 144
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_a = 'bert-base-cased'
_a = 'google/pegasus-xsum'
_a = [' Sam ate lunch today.', 'Sams lunch ingredients.']
_a = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
_a = 'patrickvonplaten/t5-tiny-random'
_a = 'sshleifer/bart-tiny-random'
_a = 'sshleifer/tiny-mbart'
_a = 'sshleifer/tiny-marian-en-de'
def _A ( UpperCamelCase_ : Path, UpperCamelCase_ : list) -> Optional[Any]:
'''simple docstring'''
__lowercase = "\n".join(UpperCamelCase_)
Path(UpperCamelCase_).open("w").writelines(UpperCamelCase_)
def _A ( UpperCamelCase_ : Union[str, Any]) -> int:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCamelCase_, F"""{split}.source"""), UpperCamelCase_)
_dump_articles(os.path.join(UpperCamelCase_, F"""{split}.target"""), UpperCamelCase_)
return tmp_dir
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
], )
@slow
def _lowercase ( self : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
__lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in ARTICLES )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in SUMMARIES )
__lowercase = 4
__lowercase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__lowercase ,__lowercase = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error.
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=UpperCAmelCase__, type_path="train", max_source_length=UpperCAmelCase__, max_target_length=UpperCAmelCase__, src_lang=UpperCAmelCase__, tgt_lang=UpperCAmelCase__, )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=2, collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__lowercase = shift_tokens_right(batch["labels"], tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def _lowercase ( self : Dict, UpperCAmelCase__ : int ):
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
__lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in ARTICLES )
__lowercase = max(len(tokenizer.encode(UpperCAmelCase__ ) ) for a in SUMMARIES )
__lowercase = 4
__lowercase = LegacySeqaSeqDataset(
UpperCAmelCase__, data_dir=UpperCAmelCase__, type_path="train", max_source_length=2_0, max_target_length=UpperCAmelCase__, )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=2, collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def _lowercase ( self : str ):
__lowercase = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" )
__lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__lowercase = tmp_dir.joinpath("train.source" ).open().readlines()
__lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(UpperCAmelCase__, UpperCAmelCase__, 1_2_8, UpperCAmelCase__ )
__lowercase = {x.name for x in tmp_dir.iterdir()}
__lowercase = {x.name for x in save_dir.iterdir()}
__lowercase = save_dir.joinpath("train.source" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(UpperCAmelCase__ ) < len(UpperCAmelCase__ )
assert len(UpperCAmelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(UpperCAmelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE, reason="This test requires fairseq" )
def _lowercase ( self : List[str] ):
if not FAIRSEQ_AVAILABLE:
return
__lowercase ,__lowercase ,__lowercase = self._get_dataset(max_len=6_4 )
__lowercase = 6_4
__lowercase = ds.make_dynamic_sampler(UpperCAmelCase__, required_batch_size_multiple=UpperCAmelCase__ )
__lowercase = [len(UpperCAmelCase__ ) for x in batch_sampler]
assert len(set(UpperCAmelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) # no dropped or added examples
__lowercase = DataLoader(UpperCAmelCase__, batch_sampler=UpperCAmelCase__, collate_fn=ds.collate_fn, num_workers=2 )
__lowercase = []
__lowercase = []
for batch in data_loader:
__lowercase = batch["input_ids"].shape
__lowercase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__lowercase = np.product(batch["input_ids"].shape )
num_src_per_batch.append(UpperCAmelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(UpperCAmelCase__ )
assert num_src_per_batch[0] == max(UpperCAmelCase__ )
if failures:
raise AssertionError(F"""too many tokens in {len(UpperCAmelCase__ )} batches""" )
def _lowercase ( self : Any ):
__lowercase ,__lowercase ,__lowercase = self._get_dataset(max_len=5_1_2 )
__lowercase = 2
__lowercase = ds.make_sortish_sampler(UpperCAmelCase__, shuffle=UpperCAmelCase__ )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=UpperCAmelCase__, collate_fn=ds.collate_fn, num_workers=2 )
__lowercase = DataLoader(UpperCAmelCase__, batch_size=UpperCAmelCase__, collate_fn=ds.collate_fn, num_workers=2, sampler=UpperCAmelCase__ )
__lowercase = tokenizer.pad_token_id
def count_pad_tokens(UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any]="input_ids" ):
return [batch[k].eq(UpperCAmelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(UpperCAmelCase__, k="labels" ) ) < sum(count_pad_tokens(UpperCAmelCase__, k="labels" ) )
assert sum(count_pad_tokens(UpperCAmelCase__ ) ) < sum(count_pad_tokens(UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : int=1_0_0_0, UpperCAmelCase__ : str=1_2_8 ):
if os.getenv("USE_REAL_DATA", UpperCAmelCase__ ):
__lowercase = "examples/seq2seq/wmt_en_ro"
__lowercase = max_len * 2 * 6_4
if not Path(UpperCAmelCase__ ).joinpath("train.len" ).exists():
save_len_file(UpperCAmelCase__, UpperCAmelCase__ )
else:
__lowercase = "examples/seq2seq/test_data/wmt_en_ro"
__lowercase = max_len * 4
save_len_file(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=UpperCAmelCase__, type_path="train", max_source_length=UpperCAmelCase__, max_target_length=UpperCAmelCase__, n_obs=UpperCAmelCase__, )
return ds, max_tokens, tokenizer
def _lowercase ( self : Union[str, Any] ):
__lowercase ,__lowercase ,__lowercase = self._get_dataset()
__lowercase = set(DistributedSortishSampler(UpperCAmelCase__, 2_5_6, num_replicas=2, rank=0, add_extra_examples=UpperCAmelCase__ ) )
__lowercase = set(DistributedSortishSampler(UpperCAmelCase__, 2_5_6, num_replicas=2, rank=1, add_extra_examples=UpperCAmelCase__ ) )
assert idsa.intersection(UpperCAmelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
], )
def _lowercase ( self : int, UpperCAmelCase__ : Optional[Any] ):
__lowercase = AutoTokenizer.from_pretrained(UpperCAmelCase__, use_fast=UpperCAmelCase__ )
if tok_name == MBART_TINY:
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ), type_path="train", max_source_length=4, max_target_length=8, src_lang="EN", tgt_lang="FR", )
__lowercase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__lowercase = SeqaSeqDataset(
UpperCAmelCase__, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ), type_path="train", max_source_length=4, max_target_length=8, )
__lowercase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(UpperCAmelCase__ ) == 1 if tok_name == BART_TINY else len(UpperCAmelCase__ ) == 0
| 144
| 1
|
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
UpperCamelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Train language if it is different from the evaluation language.'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(
default=__a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase__ = field(
default=__a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def a__ ( ) -> int:
UpperCAmelCase__ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , _lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase__ : List[str] = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
datasets.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase__ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__ : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
UpperCAmelCase__ : List[Any] = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
UpperCAmelCase__ : int = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase__ : Dict = train_dataset.features['''label'''].names
if training_args.do_eval:
UpperCAmelCase__ : Dict = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase__ : Tuple = eval_dataset.features['''label'''].names
if training_args.do_predict:
UpperCAmelCase__ : List[Any] = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase__ : Dict = predict_dataset.features['''label'''].names
# Labels
UpperCAmelCase__ : List[str] = len(_lowerCAmelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase__ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_lowerCAmelCase , idalabel={str(_lowerCAmelCase ): label for i, label in enumerate(_lowerCAmelCase )} , labelaid={label: i for i, label in enumerate(_lowerCAmelCase )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase__ : Union[str, Any] = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase__ : Any = False
def preprocess_function(lowerCAmelCase__ ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=_lowerCAmelCase , max_length=data_args.max_seq_length , truncation=_lowerCAmelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase__ : Dict = min(len(_lowerCAmelCase ) , data_args.max_train_samples )
UpperCAmelCase__ : Tuple = train_dataset.select(range(_lowerCAmelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase__ : str = train_dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowerCAmelCase ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase__ : List[str] = min(len(_lowerCAmelCase ) , data_args.max_eval_samples )
UpperCAmelCase__ : List[str] = eval_dataset.select(range(_lowerCAmelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase__ : List[Any] = eval_dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
UpperCAmelCase__ : str = min(len(_lowerCAmelCase ) , data_args.max_predict_samples )
UpperCAmelCase__ : List[str] = predict_dataset.select(range(_lowerCAmelCase ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
UpperCAmelCase__ : str = predict_dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
UpperCAmelCase__ : List[Any] = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = p.predictions[0] if isinstance(p.predictions , _lowerCAmelCase ) else p.predictions
UpperCAmelCase__ : Optional[Any] = np.argmax(_lowerCAmelCase , axis=1 )
return metric.compute(predictions=_lowerCAmelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase__ : List[Any] = default_data_collator
elif training_args.fpaa:
UpperCAmelCase__ : Dict = DataCollatorWithPadding(_lowerCAmelCase , pad_to_multiple_of=8 )
else:
UpperCAmelCase__ : Optional[Any] = None
# Initialize our Trainer
UpperCAmelCase__ : str = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , data_collator=_lowerCAmelCase , )
# Training
if training_args.do_train:
UpperCAmelCase__ : List[Any] = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase__ : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase__ : int = last_checkpoint
UpperCAmelCase__ : List[Any] = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
UpperCAmelCase__ : int = train_result.metrics
UpperCAmelCase__ : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCAmelCase )
)
UpperCAmelCase__ : Union[str, Any] = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , _lowerCAmelCase )
trainer.save_metrics('''train''' , _lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase__ : List[str] = trainer.evaluate(eval_dataset=_lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCAmelCase )
UpperCAmelCase__ : str = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics('''eval''' , _lowerCAmelCase )
trainer.save_metrics('''eval''' , _lowerCAmelCase )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = trainer.predict(_lowerCAmelCase , metric_key_prefix='''predict''' )
UpperCAmelCase__ : Any = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowerCAmelCase )
)
UpperCAmelCase__ : int = min(_lowerCAmelCase , len(_lowerCAmelCase ) )
trainer.log_metrics('''predict''' , _lowerCAmelCase )
trainer.save_metrics('''predict''' , _lowerCAmelCase )
UpperCAmelCase__ : Tuple = np.argmax(_lowerCAmelCase , axis=1 )
UpperCAmelCase__ : Optional[Any] = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(_lowerCAmelCase , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(_lowerCAmelCase ):
UpperCAmelCase__ : Optional[Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 181
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = """▁"""
lowerCamelCase = {"""vocab_file""": """spiece.model"""}
lowerCamelCase = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
lowerCamelCase = {
"""google/reformer-crime-and-punishment""": 52_4288,
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]="</s>" , _lowerCAmelCase : Any="<unk>" , _lowerCAmelCase : int=[] , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : List[Any] , ):
'''simple docstring'''
__lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__lowercase =vocab_file
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowerCAmelCase)
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase ={self.convert_ids_to_tokens(_lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any):
'''simple docstring'''
__lowercase =self.__dict__.copy()
__lowercase =None
return state
def __setstate__( self : Optional[int] , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
__lowercase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__lowercase ={}
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : str):
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : List[Any]):
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__lowercase =self.sp_model.IdToPiece(_lowerCAmelCase)
return token
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =[]
__lowercase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase) + token
__lowercase =[]
else:
current_sub_tokens.append(_lowerCAmelCase)
out_string += self.sp_model.decode(_lowerCAmelCase)
return out_string.strip()
def __lowerCamelCase ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowercase =os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCAmelCase , 'wb') as fi:
__lowercase =self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase)
return (out_vocab_file,)
| 166
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = ["pixel_values"]
def __init__( self : List[str] , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : float = None , __snake_case : PILImageResampling = PILImageResampling.BILINEAR , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , **__snake_case : Any , )-> None:
super().__init__(**__snake_case )
snake_case = size if size is not None else {"""shortest_edge""": 3_84}
snake_case = get_size_dict(__snake_case , default_to_square=__snake_case )
snake_case = do_resize
snake_case = size
# Default value set here for backwards compatibility where the value in config is None
snake_case = crop_pct if crop_pct is not None else 2_24 / 2_56
snake_case = resample
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_normalize
snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : float , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : str , )-> np.ndarray:
snake_case = get_size_dict(__snake_case , default_to_square=__snake_case )
if "shortest_edge" not in size:
raise ValueError(f'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' )
snake_case = size["""shortest_edge"""]
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
snake_case = int(shortest_edge / crop_pct )
snake_case = get_resize_output_image_size(__snake_case , size=__snake_case , default_to_square=__snake_case )
snake_case = resize(image=__snake_case , size=__snake_case , resample=__snake_case , data_format=__snake_case , **__snake_case )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__snake_case , size=(shortest_edge, shortest_edge) , data_format=__snake_case , **__snake_case )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__snake_case , size=(shortest_edge, shortest_edge) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Dict , )-> Any:
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCAmelCase ( self : List[str] , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Dict , )-> np.ndarray:
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCAmelCase ( self : Optional[int] , __snake_case : ImageInput , __snake_case : bool = None , __snake_case : Dict[str, int] = None , __snake_case : float = None , __snake_case : PILImageResampling = None , __snake_case : bool = None , __snake_case : float = None , __snake_case : bool = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[float, List[float]]] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : Optional[int] , )-> PIL.Image.Image:
snake_case = do_resize if do_resize is not None else self.do_resize
snake_case = crop_pct if crop_pct is not None else self.crop_pct
snake_case = resample if resample is not None else self.resample
snake_case = do_rescale if do_rescale is not None else self.do_rescale
snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case = do_normalize if do_normalize is not None else self.do_normalize
snake_case = image_mean if image_mean is not None else self.image_mean
snake_case = image_std if image_std is not None else self.image_std
snake_case = size if size is not None else self.size
snake_case = get_size_dict(__snake_case , default_to_square=__snake_case )
snake_case = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
snake_case = [self.resize(image=__snake_case , size=__snake_case , crop_pct=__snake_case , resample=__snake_case ) for image in images]
if do_rescale:
snake_case = [self.rescale(image=__snake_case , scale=__snake_case ) for image in images]
if do_normalize:
snake_case = [self.normalize(image=__snake_case , mean=__snake_case , std=__snake_case ) for image in images]
snake_case = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
snake_case = {"""pixel_values""": images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 3
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : dict ) -> str:
snake_case = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , """html.parser""" )
snake_case = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
snake_case = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 3
| 1
|
import random
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] = False ) -> dict:
UpperCAmelCase : dict = {i: [] for i in range(UpperCAmelCase )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCAmelCase )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCAmelCase ):
for j in range(i + 1 , UpperCAmelCase ):
if random.random() < probability:
graph[i].append(UpperCAmelCase )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCAmelCase )
return graph
def a__ ( UpperCAmelCase : List[Any] ) -> dict:
return {
i: [j for j in range(UpperCAmelCase ) if i != j] for i in range(UpperCAmelCase )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
|
'''simple docstring'''
import os
import pytest
from attr import dataclass
_snake_case = 'us-east-1' # defaults region
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : str
_SCREAMING_SNAKE_CASE : str = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_SCREAMING_SNAKE_CASE : Dict = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
_SCREAMING_SNAKE_CASE : List[str] = {**hyperparameters, 'max_steps': 1000}
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return f'''{self.framework}-transfromers-test'''
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def _A ( snake_case ) -> Tuple:
_lowercase : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 250
| 0
|
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCamelCase ( lowerCamelCase__ ):
def __init__( self, lowercase_, lowercase_ = None, lowercase_ = None, lowercase_ = True, lowercase_ = None, lowercase_ = False, lowercase_ = None, lowercase_ = True, lowercase_ = "arrow", **lowercase_, ) -> str:
super().__init__(
split=lowercase_, features=lowercase_, cache_dir=lowercase_, keep_in_memory=lowercase_, streaming=lowercase_, **lowercase_, )
snake_case = load_from_cache_file
snake_case = file_format
snake_case = Spark(
df=lowercase_, features=lowercase_, cache_dir=lowercase_, working_dir=lowercase_, **lowercase_, )
def _lowerCamelCase ( self ) -> Optional[int]:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase_, file_format=self._file_format, )
return self.builder.as_dataset(split=self.split )
| 354
|
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase :
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__lowerCAmelCase )} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
snake_case_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case_ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
snake_case_ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
snake_case_ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
snake_case_ = field(
default=__lowerCAmelCase , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
snake_case_ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
snake_case_ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
snake_case_ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''train'''
snake_case_ = '''dev'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
def __init__( self, lowercase_, lowercase_, lowercase_ = None, lowercase_ = Split.train, lowercase_ = False, lowercase_ = None, lowercase_ = "pt", ) -> int:
snake_case = args
snake_case = is_language_sensitive
snake_case = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase_, lowercase_ ):
try:
snake_case = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
snake_case = mode
# Load data features from cache or dataset file
snake_case = 'v2' if args.version_2_with_negative else 'v1'
snake_case = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''', )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case = cached_features_file + '.lock'
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
snake_case = time.time()
snake_case = torch.load(lowercase_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case = self.old_features['features']
snake_case = self.old_features.get('dataset', lowercase_ )
snake_case = self.old_features.get('examples', lowercase_ )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''', time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
snake_case = self.processor.get_dev_examples(args.data_dir )
else:
snake_case = self.processor.get_train_examples(args.data_dir )
snake_case , snake_case = squad_convert_examples_to_features(
examples=self.examples, tokenizer=lowercase_, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=lowercase_, )
snake_case = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples}, lowercase_, )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self, lowercase_ ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
snake_case = self.features[i]
snake_case = torch.tensor(feature.input_ids, dtype=torch.long )
snake_case = torch.tensor(feature.attention_mask, dtype=torch.long )
snake_case = torch.tensor(feature.token_type_ids, dtype=torch.long )
snake_case = torch.tensor(feature.cls_index, dtype=torch.long )
snake_case = torch.tensor(feature.p_mask, dtype=torch.float )
snake_case = torch.tensor(feature.is_impossible, dtype=torch.float )
snake_case = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape, dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case = torch.tensor(feature.start_position, dtype=torch.long )
snake_case = torch.tensor(feature.end_position, dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 332
| 0
|
from ....utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
class __lowerCAmelCase ( A__):
def __init__( self: List[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: int=None , _lowerCAmelCase: int=20_48 ):
lowercase :Optional[Any] = config.__dict__
lowercase :Union[str, Any] = modal_hidden_size
if num_labels:
lowercase :List[str] = num_labels
| 236
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a_ : Tuple = logging.getLogger()
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('-f')
SCREAMING_SNAKE_CASE = parser.parse_args()
return args.f
class _snake_case ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self) -> None:
SCREAMING_SNAKE_CASE = logging.StreamHandler(sys.stdout)
logger.addHandler(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
SCREAMING_SNAKE_CASE = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py')
with patch.object(a , 'argv' , a):
SCREAMING_SNAKE_CASE = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(a , 0.6_66)
@slow
@require_torch_non_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(a)
SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(a)
SCREAMING_SNAKE_CASE = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(a)
| 137
| 0
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : float | Decimal , _lowercase : float = 10**-10 ) ->float:
'''simple docstring'''
a : Tuple = a
while True:
a : Union[str, Any] = Decimal(_lowercase ) - (
Decimal(eval(_lowercase ) ) / Decimal(eval(str(diff(_lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowercase ) ) < precision: # noqa: S307
return float(_lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 79
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class __UpperCamelCase ( a__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase : str =field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCamelCase : ClassVar[Features] =Features({"""text""": Value("""string""" )} )
lowerCamelCase : ClassVar[Features] =Features({"""summary""": Value("""string""" )} )
lowerCamelCase : str ="text"
lowerCamelCase : str ="summary"
@property
def __a ( self ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 79
| 1
|
'''simple docstring'''
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowercase_ = """scheduler_config.json"""
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 3
UpperCamelCase = 4
UpperCamelCase = 5
UpperCamelCase = 6
UpperCamelCase = 7
UpperCamelCase = 8
UpperCamelCase = 9
UpperCamelCase = 10
UpperCamelCase = 11
UpperCamelCase = 12
UpperCamelCase = 13
UpperCamelCase = 14
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 42
class a_ :
'''simple docstring'''
UpperCamelCase = SCHEDULER_CONFIG_NAME
UpperCamelCase = []
UpperCamelCase = True
@classmethod
def snake_case_( cls , A = None , A = None , A=False , **A , ) -> List[Any]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cls.load_config(
pretrained_model_name_or_path=A , subfolder=A , return_unused_kwargs=A , return_commit_hash=A , **A , )
return cls.from_config(A , return_unused_kwargs=A , **A )
def snake_case_( self , A , A = False , **A ) -> List[Any]:
self.save_config(save_directory=A , push_to_hub=A , **A )
@property
def snake_case_( self ) -> List[str]:
return self._get_compatibles()
@classmethod
def snake_case_( cls ) -> Dict:
_SCREAMING_SNAKE_CASE = list(set([cls.__name__] + cls._compatibles ) )
_SCREAMING_SNAKE_CASE = importlib.import_module(__name__.split(""".""" )[0] )
_SCREAMING_SNAKE_CASE = [
getattr(A , A ) for c in compatible_classes_str if hasattr(A , A )
]
return compatible_classes
| 58
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
lowerCamelCase_ : Optional[int] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
lowerCamelCase_ : Optional[Any] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
lowerCamelCase_ : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=False ) -> Optional[Any]:
if return_pvalue:
a =pearsonr(__A , __A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__A , __A )[0] )}
| 81
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCAmelCase :List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCamelCase ( __lowercase ):
'''simple docstring'''
A_ : str = ["pixel_values"]
def __init__( self : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = True , **_A : Any , ) -> Union[str, Any]:
super().__init__(**snake_case_ )
__magic_name__ : List[str] = size if size is not None else {'''shortest_edge''': 224}
__magic_name__ : int = get_size_dict(snake_case_ , default_to_square=snake_case_ )
__magic_name__ : List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__magic_name__ : Union[str, Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name='crop_size' )
__magic_name__ : int = do_resize
__magic_name__ : List[Any] = size
__magic_name__ : Optional[int] = resample
__magic_name__ : Dict = do_center_crop
__magic_name__ : Optional[Any] = crop_size
__magic_name__ : List[str] = do_rescale
__magic_name__ : int = rescale_factor
__magic_name__ : Dict = do_normalize
__magic_name__ : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__magic_name__ : int = image_std if image_std is not None else OPENAI_CLIP_STD
__magic_name__ : Dict = do_convert_rgb
def __lowerCAmelCase ( self : List[str] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> Any:
__magic_name__ : int = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
__magic_name__ : Optional[int] = get_resize_output_image_size(snake_case_ , size=size['shortest_edge'] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[Any] , ) -> Tuple:
__magic_name__ : Optional[Any] = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(snake_case_ , size=(size['height'], size['width']) , data_format=snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self : Optional[int] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int:
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self : Union[str, Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> Optional[int]:
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __lowerCAmelCase ( self : str , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : int = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , **_A : List[str] , ) -> Union[str, Any]:
__magic_name__ : List[Any] = do_resize if do_resize is not None else self.do_resize
__magic_name__ : int = size if size is not None else self.size
__magic_name__ : Optional[int] = get_size_dict(snake_case_ , param_name='size' , default_to_square=snake_case_ )
__magic_name__ : List[Any] = resample if resample is not None else self.resample
__magic_name__ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
__magic_name__ : str = crop_size if crop_size is not None else self.crop_size
__magic_name__ : int = get_size_dict(snake_case_ , param_name='crop_size' , default_to_square=snake_case_ )
__magic_name__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : str = image_mean if image_mean is not None else self.image_mean
__magic_name__ : List[str] = image_std if image_std is not None else self.image_std
__magic_name__ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__magic_name__ : Union[str, Any] = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__magic_name__ : Optional[Any] = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
__magic_name__ : Dict = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
__magic_name__ : Any = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
__magic_name__ : Tuple = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
__magic_name__ : Optional[int] = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
__magic_name__ : str = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
__magic_name__ : Dict = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
__magic_name__ : int = {'''pixel_values''': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 352
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCAmelCase :str = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase :int = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any]=8 ):
"""simple docstring"""
__magic_name__ : List[str] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__magic_name__ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : MultilingualCLIP , _A : XLMRobertaTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, DDPMScheduler] , _A : VQModel , ) -> int:
super().__init__()
self.register_modules(
text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , movq=_A , )
__magic_name__ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : Optional[Any] , _A : Optional[int] , _A : Dict , _A : str , _A : List[str] ) -> str:
if latents is None:
__magic_name__ : Any = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__magic_name__ : int = latents.to(_A )
__magic_name__ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : List[Any] , _A : List[str] , _A : List[str] , _A : List[str] , _A : List[Any] , _A : str=None , ) -> Dict:
__magic_name__ : Optional[Any] = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
__magic_name__ : str = self.tokenizer(
_A , padding='max_length' , truncation=_A , max_length=77 , return_attention_mask=_A , add_special_tokens=_A , return_tensors='pt' , )
__magic_name__ : Optional[Any] = text_inputs.input_ids
__magic_name__ : Optional[Any] = self.tokenizer(_A , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_A , _A ):
__magic_name__ : str = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__magic_name__ : Union[str, Any] = text_input_ids.to(_A )
__magic_name__ : Dict = text_inputs.attention_mask.to(_A )
__magic_name__ , __magic_name__ : str = self.text_encoder(
input_ids=_A , attention_mask=_A )
__magic_name__ : Tuple = prompt_embeds.repeat_interleave(_A , dim=0 )
__magic_name__ : int = text_encoder_hidden_states.repeat_interleave(_A , dim=0 )
__magic_name__ : Union[str, Any] = text_mask.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : List[str]
if negative_prompt is None:
__magic_name__ : Optional[Any] = [''] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !='
F' {type(_A )}.' )
elif isinstance(_A , _A ):
__magic_name__ : int = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
__magic_name__ : Dict = negative_prompt
__magic_name__ : List[str] = self.tokenizer(
_A , padding='max_length' , max_length=77 , truncation=_A , return_attention_mask=_A , add_special_tokens=_A , return_tensors='pt' , )
__magic_name__ : Optional[int] = uncond_input.input_ids.to(_A )
__magic_name__ : Optional[Any] = uncond_input.attention_mask.to(_A )
__magic_name__ , __magic_name__ : int = self.text_encoder(
input_ids=_A , attention_mask=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__magic_name__ : List[str] = negative_prompt_embeds.shape[1]
__magic_name__ : str = negative_prompt_embeds.repeat(1 , _A )
__magic_name__ : Dict = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A )
__magic_name__ : Any = uncond_text_encoder_hidden_states.shape[1]
__magic_name__ : Optional[int] = uncond_text_encoder_hidden_states.repeat(1 , _A , 1 )
__magic_name__ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _A , -1 )
__magic_name__ : List[Any] = uncond_text_mask.repeat_interleave(_A , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__magic_name__ : str = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__magic_name__ : str = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCAmelCase ( self : Dict , _A : List[Any]=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__magic_name__ : List[Any] = torch.device(F'cuda:{gpu_id}' )
__magic_name__ : Dict = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def __lowerCAmelCase ( self : List[Any] , _A : List[str]=0 ) -> str:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__magic_name__ : int = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__magic_name__ : Optional[int] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__magic_name__ , __magic_name__ : Union[str, Any] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
if self.safety_checker is not None:
__magic_name__ , __magic_name__ : List[str] = cpu_offload_with_hook(self.safety_checker , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__magic_name__ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : int ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : int , _A : Union[str, List[str]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Optional[Union[str, List[str]]] = None , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ) -> Optional[int]:
if isinstance(_A , _A ):
__magic_name__ : Optional[int] = 1
elif isinstance(_A , _A ):
__magic_name__ : Union[str, Any] = len(_A )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(_A )}' )
__magic_name__ : Tuple = self._execution_device
__magic_name__ : Any = batch_size * num_images_per_prompt
__magic_name__ : int = guidance_scale > 1.0
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = self._encode_prompt(
_A , _A , _A , _A , _A )
if isinstance(_A , _A ):
__magic_name__ : Union[str, Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__magic_name__ : Dict = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : Dict = image_embeds.repeat_interleave(_A , dim=0 )
__magic_name__ : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
__magic_name__ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
__magic_name__ : Tuple = self.scheduler.timesteps
__magic_name__ : Optional[int] = self.unet.config.in_channels
__magic_name__ , __magic_name__ : Dict = get_new_h_w(_A , _A , self.movq_scale_factor )
# create initial latent
__magic_name__ : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__magic_name__ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__magic_name__ : Tuple = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__magic_name__ : Union[str, Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
__magic_name__ , __magic_name__ : Dict = noise_pred.chunk(2 )
__magic_name__ , __magic_name__ : List[str] = variance_pred.chunk(2 )
__magic_name__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__magic_name__ : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__magic_name__ , __magic_name__ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : List[Any] = self.scheduler.step(
_A , _A , _A , generator=_A , ).prev_sample
# post-processing
__magic_name__ : int = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__magic_name__ : Dict = image * 0.5 + 0.5
__magic_name__ : str = image.clamp(0 , 1 )
__magic_name__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__magic_name__ : str = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A )
| 275
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase_ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase_ : Tuple = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Union[PIL.Image.Image, np.ndarray]
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=snake_case_ , image_encoder=snake_case_ , image_processor=snake_case_ , scheduler=snake_case_ , renderer=snake_case_ , )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if latents is None:
A_ : List[Any] = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
A_ : Any = latents.to(snake_case_ )
A_ : str = latents * scheduler.init_noise_sigma
return latents
def lowerCamelCase_ ( self , snake_case_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
A_ : Any = torch.device(F"""cuda:{gpu_id}""" )
A_ : Optional[Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(snake_case_ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , torch.Tensor ):
A_ : List[Any] = torch.cat(snake_case_ , axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case_ , axis=0 )
if not isinstance(snake_case_ , torch.Tensor ):
A_ : str = self.image_processor(snake_case_ , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
A_ : Union[str, Any] = image.to(dtype=self.image_encoder.dtype , device=snake_case_ )
A_ : Any = self.image_encoder(snake_case_ )['last_hidden_state']
A_ : List[Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A_ : List[str] = image_embeds.repeat_interleave(snake_case_ , dim=0 )
if do_classifier_free_guidance:
A_ : Tuple = torch.zeros_like(snake_case_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : str = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self , snake_case_ , snake_case_ = 1 , snake_case_ = 2_5 , snake_case_ = None , snake_case_ = None , snake_case_ = 4.0 , snake_case_ = 6_4 , snake_case_ = "pil" , snake_case_ = True , ):
"""simple docstring"""
if isinstance(snake_case_ , PIL.Image.Image ):
A_ : Any = 1
elif isinstance(snake_case_ , torch.Tensor ):
A_ : Any = image.shape[0]
elif isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A_ : Union[str, Any] = len(snake_case_ )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case_ )}""" )
A_ : Any = self._execution_device
A_ : Optional[int] = batch_size * num_images_per_prompt
A_ : int = guidance_scale > 1.0
A_ : Optional[int] = self._encode_image(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# prior
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
A_ : int = self.scheduler.timesteps
A_ : Tuple = self.prior.config.num_embeddings
A_ : List[str] = self.prior.config.embedding_dim
A_ : int = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A_ : List[str] = latents.reshape(latents.shape[0] , snake_case_ , snake_case_ )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
A_ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : List[Any] = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
A_ : Union[str, Any] = self.prior(
snake_case_ , timestep=snake_case_ , proj_embedding=snake_case_ , ).predicted_image_embedding
# remove the variance
A_ , A_ : Tuple = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A_ , A_ : List[Any] = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A_ : List[Any] = self.scheduler.step(
snake_case_ , timestep=snake_case_ , sample=snake_case_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=snake_case_ )
A_ : Optional[Any] = []
for i, latent in enumerate(snake_case_ ):
print()
A_ : str = self.renderer.decode(
latent[None, :] , snake_case_ , size=snake_case_ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(snake_case_ )
A_ : Union[str, Any] = torch.stack(snake_case_ )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
A_ : str = images.cpu().numpy()
if output_type == "pil":
A_ : Optional[Any] = [self.numpy_to_pil(snake_case_ ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=snake_case_ )
| 286
|
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase_ : Any = re.compile(r'\s+')
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=5 ):
"""simple docstring"""
A_ : Optional[int] = ['auto-generated', 'autogenerated', 'automatically generated']
A_ : List[str] = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=5 , _UpperCAmelCase=0.05 ):
"""simple docstring"""
A_ : Any = ['unit tests', 'test file', 'configuration file']
A_ : Dict = example['content'].splitlines()
A_ : List[Any] = 0
A_ : str = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : Tuple = example['content'].count('\n' )
A_ : Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = ['def ', 'class ', 'for ', 'while ']
A_ : Tuple = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=4 ):
"""simple docstring"""
A_ : Union[str, Any] = example['content'].splitlines()
A_ : Any = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
A_ : Dict = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
lowerCamelCase_ : Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
if args.num_workers is None:
lowerCamelCase_ : int = multiprocessing.cpu_count()
lowerCamelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase_ : Tuple = time.time()
lowerCamelCase_ : Tuple = load_dataset(args.dataset_name, split='train')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase_ : List[str] = time.time()
lowerCamelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase_ : int = set(ds.unique('hash'))
lowerCamelCase_ : Union[str, Any] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase_ : Optional[int] = time.time()
lowerCamelCase_ : Tuple = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase_ : Union[str, Any] = time.time()
lowerCamelCase_ , lowerCamelCase_ : str = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase_ : Tuple = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase_ : Optional[Any] = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
lowerCamelCase_ : List[str] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase_ : Optional[int] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase_ : List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}")
| 286
| 1
|
'''simple docstring'''
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : Optional[int] = job['''started_at''']
UpperCAmelCase__ : Optional[int] = job['''completed_at''']
UpperCAmelCase__ : List[Any] = date_parser.parse(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = date_parser.parse(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = round((end_datetime - start_datetime).total_seconds() / 6_0.0 )
UpperCAmelCase__ : Union[str, Any] = start
UpperCAmelCase__ : Optional[Any] = end
UpperCAmelCase__ : List[str] = duration_in_min
return job_info
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> Union[str, Any]:
UpperCAmelCase__ : Tuple = None
if token is not None:
UpperCAmelCase__ : Tuple = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F"""Bearer {token}"""}
UpperCAmelCase__ : int = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
UpperCAmelCase__ : Any = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
UpperCAmelCase__ : List[str] = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(lowerCAmelCase__ ) for job in result['''jobs''']} )
UpperCAmelCase__ : Tuple = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(lowerCAmelCase__ ):
UpperCAmelCase__ : str = requests.get(url + F"""&page={i + 2}""" , headers=lowerCAmelCase__ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(lowerCAmelCase__ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = get_job_time(args.workflow_run_id)
UpperCamelCase__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F"""{k}: {v['duration']}""")
| 299
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCamelCase_ ( __a ):
def __get__( self : str , _A : Tuple , _A : List[str]=None ):
'''simple docstring'''
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCAmelCase__ : Union[str, Any] = '''__cached_''' + self.fget.__name__
UpperCAmelCase__ : Any = getattr(_A , _A , _A )
if cached is None:
UpperCAmelCase__ : Dict = self.fget(_A )
setattr(_A , _A , _A )
return cached
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
UpperCAmelCase__ : Tuple = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
if is_torch_fx_proxy(lowerCAmelCase__ ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase__ , np.ndarray )
def a__ ( lowerCAmelCase__ ) -> Any:
return isinstance(lowerCAmelCase__ , np.ndarray )
def a__ ( lowerCAmelCase__ ) -> int:
return _is_numpy(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
import torch
return isinstance(lowerCAmelCase__ , torch.Tensor )
def a__ ( lowerCAmelCase__ ) -> List[str]:
return False if not is_torch_available() else _is_torch(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
import torch
return isinstance(lowerCAmelCase__ , torch.device )
def a__ ( lowerCAmelCase__ ) -> List[str]:
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Any:
import torch
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
else:
return False
return isinstance(lowerCAmelCase__ , torch.dtype )
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> List[Any]:
import tensorflow as tf
return isinstance(lowerCAmelCase__ , tf.Tensor )
def a__ ( lowerCAmelCase__ ) -> List[str]:
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Any:
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase__ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(lowerCAmelCase__ )
return type(lowerCAmelCase__ ) == tf.Tensor
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Tuple:
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase__ , jnp.ndarray )
def a__ ( lowerCAmelCase__ ) -> List[Any]:
return False if not is_flax_available() else _is_jax(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ ) -> Tuple:
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return [to_py_obj(lowerCAmelCase__ ) for o in obj]
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ ).tolist()
elif isinstance(lowerCAmelCase__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def a__ ( lowerCAmelCase__ ) -> Tuple:
if isinstance(lowerCAmelCase__ , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase__ ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase__ , (list, tuple) ):
return np.array(lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase__ ):
return np.asarray(lowerCAmelCase__ )
else:
return obj
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = fields(self )
# Safety and consistency checks
if not len(_A ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
UpperCAmelCase__ : Dict = getattr(self , class_fields[0].name )
UpperCAmelCase__ : Any = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_A ):
if isinstance(_A , _A ):
UpperCAmelCase__ : List[Any] = first_field.items()
UpperCAmelCase__ : Optional[int] = True
else:
try:
UpperCAmelCase__ : Optional[int] = iter(_A )
UpperCAmelCase__ : Optional[int] = True
except TypeError:
UpperCAmelCase__ : Optional[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_A ):
if (
not isinstance(_A , (list, tuple) )
or not len(_A ) == 2
or not isinstance(element[0] , _A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCAmelCase__ : List[Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCAmelCase__ : List[str] = element[1]
elif first_field is not None:
UpperCAmelCase__ : Optional[Any] = first_field
else:
for field in class_fields:
UpperCAmelCase__ : Optional[int] = getattr(self , field.name )
if v is not None:
UpperCAmelCase__ : str = v
def __delitem__( self : Union[str, Any] , *_A : Any , **_A : str ):
'''simple docstring'''
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def lowercase_ ( self : Any , *_A : List[str] , **_A : Tuple ):
'''simple docstring'''
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def lowercase_ ( self : Optional[Any] , *_A : Any , **_A : Tuple ):
'''simple docstring'''
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def lowercase_ ( self : Optional[Any] , *_A : Dict , **_A : List[Any] ):
'''simple docstring'''
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : List[str] , _A : Any ):
'''simple docstring'''
if isinstance(_A , _A ):
UpperCAmelCase__ : Union[str, Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : int , _A : Union[str, Any] , _A : str ):
'''simple docstring'''
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_A , _A )
super().__setattr__(_A , _A )
def __setitem__( self : Any , _A : Optional[int] , _A : List[str] ):
'''simple docstring'''
super().__setitem__(_A , _A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return tuple(self[k] for k in self.keys() )
class lowerCamelCase_ ( __a , __a ):
@classmethod
def lowercase_ ( cls : Optional[Any] , _A : Optional[Any] ):
'''simple docstring'''
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'longest'
lowerCAmelCase__ = 'max_length'
lowerCAmelCase__ = 'do_not_pad'
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'pt'
lowerCAmelCase__ = 'tf'
lowerCAmelCase__ = 'np'
lowerCAmelCase__ = 'jax'
class lowerCamelCase_ :
def __init__( self : List[Any] , _A : List[ContextManager] ):
'''simple docstring'''
UpperCAmelCase__ : str = context_managers
UpperCAmelCase__ : int = ExitStack()
def __enter__( self : str ):
'''simple docstring'''
for context_manager in self.context_managers:
self.stack.enter_context(_A )
def __exit__( self : Dict , *_A : List[Any] , **_A : str ):
'''simple docstring'''
self.stack.__exit__(*_A , **_A )
def a__ ( lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : int = infer_framework(lowerCAmelCase__ )
if framework == "tf":
UpperCAmelCase__ : Optional[Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ : List[Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ : List[Any] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def a__ ( lowerCAmelCase__ ) -> Optional[int]:
UpperCAmelCase__ : Dict = model_class.__name__
UpperCAmelCase__ : Union[str, Any] = infer_framework(lowerCAmelCase__ )
if framework == "tf":
UpperCAmelCase__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCAmelCase__ : List[str] = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCAmelCase__ : int = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = "" , lowerCAmelCase__ = "." ) -> Any:
def _flatten_dict(lowerCAmelCase__ , lowerCAmelCase__="" , lowerCAmelCase__="." ):
for k, v in d.items():
UpperCAmelCase__ : int = str(lowerCAmelCase__ ) + delimiter + str(lowerCAmelCase__ ) if parent_key else k
if v and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
yield from flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , delimiter=lowerCAmelCase__ ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
@contextmanager
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ = False ) -> int:
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]:
if is_numpy_array(lowerCAmelCase__ ):
return np.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.T if axes is None else array.permute(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase__ , perm=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.transpose(lowerCAmelCase__ , axes=lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for transpose: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
if is_numpy_array(lowerCAmelCase__ ):
return np.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.reshape(*lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.reshape(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for reshape: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[Any]:
if is_numpy_array(lowerCAmelCase__ ):
return np.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.squeeze(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
if is_numpy_array(lowerCAmelCase__ ):
return np.expand_dims(lowerCAmelCase__ , lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.unsqueeze(dim=lowerCAmelCase__ )
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return jnp.expand_dims(lowerCAmelCase__ , axis=lowerCAmelCase__ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ ) -> int:
if is_numpy_array(lowerCAmelCase__ ):
return np.size(lowerCAmelCase__ )
elif is_torch_tensor(lowerCAmelCase__ ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase__ ):
import tensorflow as tf
return tf.size(lowerCAmelCase__ )
elif is_jax_tensor(lowerCAmelCase__ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(lowerCAmelCase__ )}.""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
for key, value in auto_map.items():
if isinstance(lowerCAmelCase__ , (tuple, list) ):
UpperCAmelCase__ : int = [F"""{repo_id}--{v}""" if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCAmelCase__ : str = F"""{repo_id}--{value}"""
return auto_map
def a__ ( lowerCAmelCase__ ) -> Tuple:
for base_class in inspect.getmro(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = base_class.__module__
UpperCAmelCase__ : Optional[int] = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 299
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , __lowercase : Union[str, Any] , __lowercase : List[str]=7 , __lowercase : str=3 , __lowercase : Dict=18 , __lowercase : str=30 , __lowercase : Any=4_00 , __lowercase : Tuple=True , __lowercase : int=None , __lowercase : Any=True , __lowercase : Any=None , __lowercase : List[Any]=True , ):
"""simple docstring"""
snake_case_ = size if size is not None else {"shortest_edge": 20}
snake_case_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = min_resolution
snake_case_ = max_resolution
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_flip_channel_order
def snake_case__ ( self : str ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = MobileViTImageProcessor if is_vision_available() else None
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = MobileViTImageProcessingTester(self )
@property
def snake_case__ ( self : List[str] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : int ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_center_crop" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "center_crop" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_flip_channel_order" ) )
def snake_case__ ( self : Dict ):
"""simple docstring"""
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
pass
def snake_case__ ( self : List[Any] ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case__ ( self : Any ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case__ ( self : List[str] ):
"""simple docstring"""
snake_case_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
snake_case_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 187
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=400 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 20}
_UpperCAmelCase = do_thumbnail
_UpperCAmelCase = do_align_axis
_UpperCAmelCase = do_pad
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( UpperCAmelCase , unittest.TestCase ):
_a : List[str] = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_thumbnail' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_align_long_axis' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@is_flaky()
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 329
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : int = "roberta"
def __init__( self : Dict ,_snake_case : Tuple=50_265 ,_snake_case : Any=768 ,_snake_case : Any=12 ,_snake_case : List[Any]=12 ,_snake_case : int=3_072 ,_snake_case : str="gelu" ,_snake_case : str=0.1 ,_snake_case : Tuple=0.1 ,_snake_case : List[Any]=512 ,_snake_case : Any=2 ,_snake_case : Dict=0.02 ,_snake_case : Tuple=1e-12 ,_snake_case : List[str]=1 ,_snake_case : Optional[Any]=0 ,_snake_case : List[Any]=2 ,_snake_case : Union[str, Any]="absolute" ,_snake_case : Union[str, Any]=True ,_snake_case : int=None ,**_snake_case : List[str] ,) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case ,bos_token_id=_snake_case ,eos_token_id=_snake_case ,**_snake_case )
lowercase__ : str = vocab_size
lowercase__ : str = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Dict = hidden_act
lowercase__ : Tuple = intermediate_size
lowercase__ : str = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : Tuple = type_vocab_size
lowercase__ : str = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : str = position_embedding_type
lowercase__ : str = use_cache
lowercase__ : List[str] = classifier_dropout
class __A ( A_ ):
'''simple docstring'''
@property
def UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase__ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 302
|
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[int]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Dict = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : Any = 2
# Initialize accelerator
lowercase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : List[Any] = config['''lr''']
lowercase__ : Union[str, Any] = int(config['''num_epochs'''] )
lowercase__ : List[str] = int(config['''seed'''] )
lowercase__ : Any = int(config['''batch_size'''] )
lowercase__ : int = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : str = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[str] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : int = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Dict = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 302
| 1
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : List[str] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "pix2struct_text_model"
_UpperCAmelCase :str = ["past_key_values"]
_UpperCAmelCase :str = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , snake_case__ : Any=5_0244 , snake_case__ : Optional[int]=768 , snake_case__ : Dict=64 , snake_case__ : List[str]=2048 , snake_case__ : Dict=12 , snake_case__ : Any=12 , snake_case__ : Dict=32 , snake_case__ : int=128 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=1E-6 , snake_case__ : Any=1.0 , snake_case__ : int="gelu_new" , snake_case__ : Optional[Any]=0 , snake_case__ : Any=False , snake_case__ : Any=0 , snake_case__ : Any=1 , snake_case__ : Optional[int]=False , snake_case__ : Tuple=True , **snake_case__ : Any , ):
lowerCamelCase_ : List[str] =vocab_size
lowerCamelCase_ : Tuple =hidden_size
lowerCamelCase_ : Optional[int] =d_kv
lowerCamelCase_ : List[Any] =d_ff
lowerCamelCase_ : Tuple =num_layers
lowerCamelCase_ : Optional[int] =num_heads
lowerCamelCase_ : Any =relative_attention_num_buckets
lowerCamelCase_ : Optional[int] =relative_attention_max_distance
lowerCamelCase_ : List[Any] =dropout_rate
lowerCamelCase_ : str =layer_norm_epsilon
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : str =use_cache
lowerCamelCase_ : int =eos_token_id
lowerCamelCase_ : Optional[Any] =decoder_start_token_id
# for backwards compatibility
lowerCamelCase_ : Optional[Any] =dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , snake_case__ : Union[str, os.PathLike] , **snake_case__ : str ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Any =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase_ : List[Any] =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "pix2struct_vision_model"
def __init__( self : Optional[int] , snake_case__ : Tuple=768 , snake_case__ : str=768 , snake_case__ : Union[str, Any]=2048 , snake_case__ : Tuple=64 , snake_case__ : List[Any]=12 , snake_case__ : Dict=12 , snake_case__ : int="gelu_new" , snake_case__ : str=1E-6 , snake_case__ : int=0.0 , snake_case__ : int=0.0 , snake_case__ : Dict=1E-10 , snake_case__ : Tuple=1.0 , snake_case__ : int=4096 , snake_case__ : Tuple=32 , snake_case__ : List[str]=128 , **snake_case__ : List[Any] , ):
super().__init__(**snake_case__ )
lowerCamelCase_ : int =hidden_size
lowerCamelCase_ : List[Any] =patch_embed_hidden_size
lowerCamelCase_ : Tuple =d_ff
lowerCamelCase_ : List[Any] =dropout_rate
lowerCamelCase_ : Dict =num_hidden_layers
lowerCamelCase_ : List[str] =num_attention_heads
lowerCamelCase_ : Optional[Any] =initializer_range
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : Any =attention_dropout
lowerCamelCase_ : List[str] =layer_norm_eps
lowerCamelCase_ : int =dense_act_fn
lowerCamelCase_ : Optional[Any] =seq_len
lowerCamelCase_ : Optional[int] =relative_attention_num_buckets
lowerCamelCase_ : Optional[int] =relative_attention_max_distance
lowerCamelCase_ : Dict =d_kv
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Dict ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Dict =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase_ : List[Any] =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :str = "pix2struct"
_UpperCAmelCase :List[str] = True
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=1.0 , snake_case__ : List[Any]=0.02 , snake_case__ : List[Any]=False , snake_case__ : int=False , snake_case__ : Any=True , **snake_case__ : List[Any] , ):
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowerCamelCase_ : Dict ={}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowerCamelCase_ : int ={}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowerCamelCase_ : Any =PixaStructTextConfig(**snake_case__ )
lowerCamelCase_ : Optional[Any] =PixaStructVisionConfig(**snake_case__ )
lowerCamelCase_ : str =self.text_config.decoder_start_token_id
lowerCamelCase_ : Optional[int] =self.text_config.pad_token_id
lowerCamelCase_ : List[Any] =self.text_config.eos_token_id
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : Optional[Any] =initializer_range
lowerCamelCase_ : Any =self.initializer_range
lowerCamelCase_ : List[Any] =self.initializer_range
lowerCamelCase_ : List[str] =is_vqa
@classmethod
def UpperCAmelCase__ ( cls : List[str] , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Tuple =copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Dict =self.text_config.to_dict()
lowerCamelCase_ : Union[str, Any] =self.vision_config.to_dict()
lowerCamelCase_ : Dict =self.__class__.model_type
return output
| 144
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
A__ : Any = True
except (ImportError, ModuleNotFoundError):
A__ : str = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _snake_case ( lowerCamelCase__ : str ) -> str:
re.sub("<n>" , "" , lowerCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCamelCase__ ) )
| 144
| 1
|
import math
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__=0 ) -> str: # a graph with Node 0,1,...,N-1
"""simple docstring"""
UpperCamelCase__ : int = n
UpperCamelCase__ : Optional[Any] = [
[math.inf for j in range(0, __magic_name__ )] for i in range(0, __magic_name__ )
] # adjacency matrix for weight
UpperCamelCase__ : List[str] = [
[math.inf for j in range(0, __magic_name__ )] for i in range(0, __magic_name__ )
] # dp[i][j] stores minimum distance from i to j
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = w
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
for k in range(0, self.n ):
for i in range(0, self.n ):
for j in range(0, self.n ):
UpperCamelCase__ : Tuple = min(self.dp[i][j], self.dp[i][k] + self.dp[k][j] )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> Tuple:
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
UpperCAmelCase_ = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 247
|
from collections import deque
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> str:
UpperCamelCase__ : Optional[int] = len(__UpperCAmelCase )
UpperCamelCase__ : str = deque()
UpperCamelCase__ : int = [False for _ in range(__UpperCAmelCase )]
UpperCamelCase__ : Optional[int] = [-1 for _ in range(__UpperCAmelCase )]
UpperCamelCase__ : str = index_of[:]
def strong_connect(__UpperCAmelCase: Optional[int] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Union[str, Any] ):
UpperCamelCase__ : str = index # the number when this node is seen
UpperCamelCase__ : Any = index # lowest rank node reachable from here
index += 1
stack.append(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = True
for w in g[v]:
if index_of[w] == -1:
UpperCamelCase__ : str = strong_connect(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCamelCase__ : Dict = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : str = stack.pop()
UpperCamelCase__ : int = False
component.append(__UpperCAmelCase )
while w != v:
UpperCamelCase__ : int = stack.pop()
UpperCamelCase__ : Optional[Any] = False
component.append(__UpperCAmelCase )
components.append(__UpperCAmelCase )
return index
UpperCamelCase__ : Optional[Any] = []
for v in range(__UpperCAmelCase ):
if index_of[v] == -1:
strong_connect(__UpperCAmelCase , 0 , __UpperCAmelCase )
return components
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[Any] ) -> str:
UpperCamelCase__ : Dict = [[] for _ in range(__UpperCAmelCase )]
for u, v in edges:
g[u].append(__UpperCAmelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ = 7
UpperCAmelCase_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 247
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase : Optional[Any] = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''pixel_values''']
def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : str = size if size is not None else {'''shortest_edge''': 384}
A : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : str = do_resize
A : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
A : List[Any] = crop_pct if crop_pct is not None else 224 / 256
A : Optional[int] = resample
A : Union[str, Any] = do_rescale
A : List[str] = rescale_factor
A : Union[str, Any] = do_normalize
A : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
A : str = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
A : Any = size['''shortest_edge''']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A : Dict = int(shortest_edge / crop_pct )
A : str = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : int = resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
SCREAMING_SNAKE_CASE , size=(shortest_edge, shortest_edge) , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
A : int = do_resize if do_resize is not None else self.do_resize
A : Tuple = crop_pct if crop_pct is not None else self.crop_pct
A : Optional[Any] = resample if resample is not None else self.resample
A : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
A : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
A : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
A : List[str] = image_std if image_std is not None else self.image_std
A : Union[str, Any] = size if size is not None else self.size
A : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
A : Any = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('''crop_pct must be specified if size < 384.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
A : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A : Any = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , crop_pct=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A : Dict = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
A : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
A : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 3
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : str = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A ( __snake_case ):
__magic_name__ = '''gpt_neo'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , SCREAMING_SNAKE_CASE=50257 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=50256 , SCREAMING_SNAKE_CASE=50256 , **SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
A : Union[str, Any] = vocab_size
A : Optional[Any] = max_position_embeddings
A : Dict = hidden_size
A : Optional[Any] = num_layers
A : Tuple = num_heads
A : int = intermediate_size
A : Optional[Any] = window_size
A : List[Any] = activation_function
A : Union[str, Any] = resid_dropout
A : Any = embed_dropout
A : List[Any] = attention_dropout
A : str = classifier_dropout
A : List[Any] = layer_norm_epsilon
A : str = initializer_range
A : List[str] = use_cache
A : Optional[int] = bos_token_id
A : List[Any] = eos_token_id
A : int = attention_types
A : int = self.expand_attention_types_params(SCREAMING_SNAKE_CASE )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
F'`config.num_layers = {self.num_layers}`. '
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : List[str] = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
A : Tuple = input.size()
A : Union[str, Any] = len(snake_case__ )
A : List[str] = shape[dimension]
A : Union[str, Any] = torch.arange(0 , snake_case__ , snake_case__ )
A : List[str] = torch.div(sizedim - size , snake_case__ , rounding_mode='''floor''' ) + 1
A : Optional[int] = torch.arange(snake_case__ ) + low_indices[:min_length][:, None]
A : str = [slice(snake_case__ )] * rank
A : List[Any] = indices
A : Union[str, Any] = input[s]
A : List[str] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
A : List[str] = torch.arange(1 , snake_case__ )
A : Optional[int] = torch.remainder(snake_case__ , snake_case__ )
A : Optional[int] = remainders == 0
A : Optional[Any] = candidates[divisor_indices]
A : Optional[int] = torch.max(snake_case__ )
return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='''floor''' )
class A ( __snake_case ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
A : Tuple = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction='''inputs''' )
A : Optional[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
A : Dict = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self._config.num_heads
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = -1 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ) -> Mapping[str, Any]:
"""simple docstring"""
A : List[str] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
A : Any = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A, A : Dict = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
A : str = seqlen + 2
A : List[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A : Any = [
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
A : str = common_inputs['''attention_mask''']
if self.use_past:
A : Optional[int] = ordered_inputs['''attention_mask'''].dtype
A : List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return 13
| 3
| 1
|
'''simple docstring'''
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __snake_case : int , __snake_case : Optional[Any]=None , __snake_case : int=None )-> str:
snake_case = data
snake_case = previous
snake_case = next_node
def __str__( self : Union[str, Any] )-> str:
return f'''{self.data}'''
def lowerCAmelCase ( self : Tuple )-> int:
return self.data
def lowerCAmelCase ( self : str )-> str:
return self.next
def lowerCAmelCase ( self : Dict )-> Optional[int]:
return self.previous
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , __snake_case : List[Any] )-> List[str]:
snake_case = head
def __iter__( self : Optional[int] )-> Dict:
return self
def lowerCAmelCase ( self : Optional[Any] )-> List[str]:
if not self.current:
raise StopIteration
else:
snake_case = self.current.get_data()
snake_case = self.current.get_next()
return value
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] )-> str:
snake_case = None # First node in list
snake_case = None # Last node in list
def __str__( self : List[str] )-> Any:
snake_case = self.head
snake_case = []
while current is not None:
nodes.append(current.get_data() )
snake_case = current.get_next()
return " ".join(str(__snake_case ) for node in nodes )
def __contains__( self : Optional[Any] , __snake_case : int )-> Optional[Any]:
snake_case = self.head
while current:
if current.get_data() == value:
return True
snake_case = current.get_next()
return False
def __iter__( self : Dict )-> List[Any]:
return LinkedListIterator(self.head )
def lowerCAmelCase ( self : Tuple )-> int:
if self.head:
return self.head.get_data()
return None
def lowerCAmelCase ( self : Dict )-> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def lowerCAmelCase ( self : List[Any] , __snake_case : Node )-> None:
if self.head is None:
snake_case = node
snake_case = node
else:
self.insert_before_node(self.head , __snake_case )
def lowerCAmelCase ( self : int , __snake_case : Node )-> None:
if self.head is None:
self.set_head(__snake_case )
else:
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> None:
snake_case = Node(__snake_case )
if self.head is None:
self.set_head(__snake_case )
else:
self.set_tail(__snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.previous
if node.get_previous() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : Optional[int] , __snake_case : Node , __snake_case : Node )-> None:
snake_case = node
snake_case = node.next
if node.get_next() is None:
snake_case = node_to_insert
else:
snake_case = node_to_insert
snake_case = node_to_insert
def lowerCAmelCase ( self : int , __snake_case : int , __snake_case : int )-> None:
snake_case = 1
snake_case = Node(__snake_case )
snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__snake_case , __snake_case )
return
current_position += 1
snake_case = node.next
self.insert_after_node(self.tail , __snake_case )
def lowerCAmelCase ( self : str , __snake_case : int )-> Node:
snake_case = self.head
while node:
if node.get_data() == item:
return node
snake_case = node.get_next()
raise Exception("""Node not found""" )
def lowerCAmelCase ( self : Any , __snake_case : Dict )-> Tuple:
if (node := self.get_node(__snake_case )) is not None:
if node == self.head:
snake_case = self.head.get_next()
if node == self.tail:
snake_case = self.tail.get_previous()
self.remove_node_pointers(__snake_case )
@staticmethod
def lowerCAmelCase ( __snake_case : Node )-> None:
if node.get_next():
snake_case = node.previous
if node.get_previous():
snake_case = node.next
snake_case = None
snake_case = None
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
return self.head is None
def __lowerCamelCase ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3
|
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/vocab.json")
_SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def lowerCAmelCase ( self : str )-> Any:
snake_case = 0
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig()
snake_case = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(__snake_case )
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , __snake_case ) )
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in tokenizer
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Dict )-> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaFeatureExtractor()
snake_case = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
snake_case = WavaVecaProcessor(__snake_case , __snake_case )
# save in new folder
processor.save_pretrained(__snake_case )
# drop `processor_class` in feature extractor
with open(os.path.join(__snake_case , __snake_case ) , """r""" ) as f:
snake_case = json.load(__snake_case )
config_dict.pop("""processor_class""" )
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write(json.dumps(__snake_case ) )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : Optional[int] )-> str:
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(__snake_case )
# copy relevant files
copyfile(__snake_case , os.path.join(__snake_case , """vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(__snake_case , __snake_case ) , """w""" ) as f:
f.write("""{}""" )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def lowerCAmelCase ( self : int )-> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
snake_case = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
snake_case = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case , use_fast=__snake_case )
snake_case = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoProcessor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__snake_case )
snake_case = AutoProcessor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Any )-> Tuple:
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = False
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = "AutoFeatureExtractor"
snake_case_ = "AutoTokenizer"
snake_case_ = False
try:
AutoConfig.register("""custom""" , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
AutoTokenizer.register(__snake_case , slow_tokenizer_class=__snake_case )
AutoProcessor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local classes.
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=__snake_case )
self.assertEqual(processor.__class__.__name__ , """NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str )-> Union[str, Any]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" )
def lowerCAmelCase ( self : Any )-> List[str]:
snake_case = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Tuple:
snake_case = TOKEN
HfFolder.save_token(__snake_case )
@classmethod
def lowerCAmelCase ( cls : Optional[Any] )-> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def lowerCAmelCase ( self : List[Any] )-> str:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor""" ) , push_to_hub=__snake_case , use_auth_token=self._token )
snake_case = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : Any )-> Optional[Any]:
snake_case = WavaVecaProcessor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__snake_case , """test-processor-org""" ) , push_to_hub=__snake_case , use_auth_token=self._token , organization="""valid_org""" , )
snake_case = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__snake_case , getattr(new_processor.feature_extractor , __snake_case ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def lowerCAmelCase ( self : List[str] )-> int:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = os.path.join(__snake_case , """vocab.txt""" )
with open(__snake_case , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
snake_case = CustomTokenizer(__snake_case )
snake_case = CustomProcessor(__snake_case , __snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case = Repository(__snake_case , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__snake_case )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__snake_case , """tokenizer_config.json""" ) ) as f:
snake_case = json.load(__snake_case )
self.assertDictEqual(
tokenizer_config["""auto_map"""] , {
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(__snake_case , """custom_processing.py""" ) ) )
repo.push_to_hub()
snake_case = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=__snake_case )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
| 3
| 1
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
a_ = HUGGINGFACE_HUB_CACHE
a_ = 'config.json'
a_ = 'diffusion_pytorch_model.bin'
a_ = 'diffusion_flax_model.msgpack'
a_ = 'model.onnx'
a_ = 'diffusion_pytorch_model.safetensors'
a_ = 'weights.pb'
a_ = 'https://huggingface.co'
a_ = default_cache_path
a_ = 'diffusers_modules'
a_ = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules'))
a_ = ['fp16', 'non-ema']
a_ = '.self_attn'
| 76
|
a_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
def lowerCamelCase__ ( _a , _a , _a):
SCREAMING_SNAKE_CASE : int = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE : int = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
# get the last node from the path
SCREAMING_SNAKE_CASE : Union[str, Any] = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE : List[str] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE : List[Any] = list(_a)
new_path.append(_a)
queue.append(_a)
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_a)
# in case there's no path between the 2 nodes
return []
def lowerCamelCase__ ( _a , _a , _a):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE : str = [start]
SCREAMING_SNAKE_CASE : Optional[Any] = set(_a)
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE : Union[str, Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE : Optional[int] = queue.pop(0)
if node == target:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node])
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_a)
queue.append(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
| 76
| 1
|
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = None
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=0.999 , lowerCAmelCase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase_ = []
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = i / num_diffusion_timesteps
UpperCAmelCase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = 1
@register_to_config
def __init__( self : List[Any] , _UpperCAmelCase : int = 1000 , _UpperCAmelCase : float = 0.0001 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : str = "linear" , _UpperCAmelCase : Optional[Union[np.ndarray, List[float]]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : int = 0 , _UpperCAmelCase : str = "epsilon" , _UpperCAmelCase : float = 1.0 , **_UpperCAmelCase : Any , ) -> List[Any]:
'''simple docstring'''
if kwargs.get("set_alpha_to_one" , _UpperCAmelCase ) is not None:
UpperCAmelCase_ = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , _UpperCAmelCase , standard_warn=_UpperCAmelCase )
UpperCAmelCase_ = kwargs["set_alpha_to_one"]
if trained_betas is not None:
UpperCAmelCase_ = torch.tensor(_UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase_ = torch.linspace(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase_ = betas_for_alpha_bar(_UpperCAmelCase )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase_ = 1.0 - self.betas
UpperCAmelCase_ = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
UpperCAmelCase_ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
UpperCAmelCase_ = 1.0
# setable values
UpperCAmelCase_ = None
UpperCAmelCase_ = torch.from_numpy(np.arange(0 , _UpperCAmelCase ).copy().astype(np.intaa ) )
def lowercase__ ( self : List[str] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def lowercase__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, torch.device] = None ) -> int:
'''simple docstring'''
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"""
F""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"""
F""" maximal {self.config.num_train_timesteps} timesteps.""" )
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase_ = (np.arange(0 , _UpperCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
UpperCAmelCase_ = torch.from_numpy(_UpperCAmelCase ).to(_UpperCAmelCase )
self.timesteps += self.config.steps_offset
def lowercase__ ( self : str , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
'''simple docstring'''
UpperCAmelCase_ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
UpperCAmelCase_ = self.alphas_cumprod[timestep]
UpperCAmelCase_ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
UpperCAmelCase_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
UpperCAmelCase_ = model_output
elif self.config.prediction_type == "sample":
UpperCAmelCase_ = model_output
UpperCAmelCase_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase_ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
UpperCAmelCase_ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"""
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCAmelCase_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_UpperCAmelCase , pred_original_sample=_UpperCAmelCase )
def __len__( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 241
|
"""simple docstring"""
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
return [sentence[i : i + ngram_size] for i in range(len(lowerCAmelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 241
| 1
|
from __future__ import annotations
from typing import Any
class a_ :
"""simple docstring"""
def __init__( self , _lowerCamelCase = 6 ) ->None:
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
self.create_linked_list(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : List[str] = Node()
SCREAMING_SNAKE_CASE : Optional[int] = current_node
SCREAMING_SNAKE_CASE : List[str] = current_node
SCREAMING_SNAKE_CASE : List[str] = current_node
for _ in range(1 , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Any = Node()
SCREAMING_SNAKE_CASE : Dict = current_node
SCREAMING_SNAKE_CASE : Optional[Any] = previous_node
SCREAMING_SNAKE_CASE : List[Any] = current_node
SCREAMING_SNAKE_CASE : Any = self.front
SCREAMING_SNAKE_CASE : Union[str, Any] = previous_node
def __lowerCAmelCase ( self ) ->bool:
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def __lowerCAmelCase ( self ) ->Any | None:
self.check_can_perform_operation()
return self.front.data if self.front else None
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
SCREAMING_SNAKE_CASE : Any = self.rear.next
if self.rear:
SCREAMING_SNAKE_CASE : int = data
def __lowerCAmelCase ( self ) ->Any:
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.front.data
SCREAMING_SNAKE_CASE : Optional[Any] = None
return data
SCREAMING_SNAKE_CASE : Tuple = self.front
SCREAMING_SNAKE_CASE : int = old_front.next
SCREAMING_SNAKE_CASE : Any = old_front.data
SCREAMING_SNAKE_CASE : str = None
return data
def __lowerCAmelCase ( self ) ->None:
if self.is_empty():
raise Exception('''Empty Queue''' )
def __lowerCAmelCase ( self ) ->None:
if self.rear and self.rear.next == self.front:
raise Exception('''Full Queue''' )
class a_ :
"""simple docstring"""
def __init__( self ) ->None:
SCREAMING_SNAKE_CASE : Any | None = None
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 313
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
a__ : Optional[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE : Union[str, Any] = k.replace(a__ , a__ )
return k
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = DEFAULTS.copy()
cfg_kwargs.update(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = PegasusConfig(**a__ )
SCREAMING_SNAKE_CASE : Optional[int] = PegasusForConditionalGeneration(a__ )
SCREAMING_SNAKE_CASE : Dict = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE : int = rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE : Dict = v.T
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE : Tuple = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE : int = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Union[str, Any] = mapping['''shared.weight''']
SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = torch_model.model.load_state_dict(a__ , strict=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase_( a__="./ckpt/aeslc/model.ckpt-32000" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tf.train.list_variables(a__ )
SCREAMING_SNAKE_CASE : str = {}
SCREAMING_SNAKE_CASE : List[Any] = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(a__ , desc='''converting tf checkpoint to dict''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE : Dict = tf.train.load_variable(a__ , a__ )
SCREAMING_SNAKE_CASE : Any = array
return tf_weights
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = Path(a__ ).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
SCREAMING_SNAKE_CASE : Dict = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
SCREAMING_SNAKE_CASE : Any = get_tf_weights_as_numpy(a__ )
SCREAMING_SNAKE_CASE : List[str] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE : int = task_specific_params
SCREAMING_SNAKE_CASE : List[str] = convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(a__ , Path(a__ ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
a__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : List[str] = parser.parse_args()
if args.save_dir is None:
a__ : Any = Path(args.tf_ckpt_path).parent.name
a__ : int = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 313
| 1
|
"""simple docstring"""
import os
import sys
_lowerCAmelCase : str = os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_lowerCAmelCase : List[Any] = [
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *snake_case : List[Any] , **snake_case : Any )-> Dict:
'''simple docstring'''
return AutoConfig.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *snake_case : Union[str, Any] , **snake_case : Dict )-> Optional[int]:
'''simple docstring'''
return AutoTokenizer.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *snake_case : Optional[int] , **snake_case : str )-> Union[str, Any]:
'''simple docstring'''
return AutoModel.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *snake_case : str , **snake_case : str )-> Tuple:
'''simple docstring'''
return AutoModelForCausalLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *snake_case : Optional[Any] , **snake_case : Tuple )-> Tuple:
'''simple docstring'''
return AutoModelForMaskedLM.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *snake_case : List[str] , **snake_case : Any )-> Dict:
'''simple docstring'''
return AutoModelForSequenceClassification.from_pretrained(*snake_case , **snake_case )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *snake_case : List[Any] , **snake_case : int )-> str:
'''simple docstring'''
return AutoModelForQuestionAnswering.from_pretrained(*snake_case , **snake_case )
| 352
|
"""simple docstring"""
import functools
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int:
'''simple docstring'''
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : str = len(snake_case )
@functools.cache
def min_distance(snake_case : int , snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 298
| 0
|
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return [sentence[i : i + ngram_size] for i in range(len(_UpperCAmelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 13
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , ) -> str:
'''simple docstring'''
snake_case : List[str] = parent
snake_case : int = 13
snake_case : Optional[int] = 7
snake_case : Tuple = True
snake_case : Optional[Any] = True
snake_case : Optional[Any] = False
snake_case : Optional[Any] = True
snake_case : List[Any] = 99
snake_case : Union[str, Any] = 32
snake_case : Union[str, Any] = 2
snake_case : Union[str, Any] = 4
snake_case : Optional[Any] = 37
snake_case : str = "gelu"
snake_case : int = 0.1
snake_case : Any = 0.1
snake_case : Any = 512
snake_case : Union[str, Any] = 16
snake_case : List[str] = 2
snake_case : int = 0.02
snake_case : str = 3
snake_case : Tuple = 4
snake_case : Optional[Any] = None
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Optional[Any] = None
if self.use_input_mask:
snake_case : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[Any] = None
snake_case : int = None
snake_case : Tuple = None
if self.use_labels:
snake_case : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : str = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Optional[Any] = TFDistilBertModel(config=UpperCamelCase__ )
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case : Union[str, Any] = model(UpperCamelCase__ )
snake_case : Tuple = [input_ids, input_mask]
snake_case : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : Tuple = TFDistilBertForMaskedLM(config=UpperCamelCase__ )
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : List[Any] = TFDistilBertForQuestionAnswering(config=UpperCamelCase__ )
snake_case : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
snake_case : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : int = self.num_labels
snake_case : List[str] = TFDistilBertForSequenceClassification(UpperCamelCase__ )
snake_case : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : Tuple = self.num_choices
snake_case : Any = TFDistilBertForMultipleChoice(UpperCamelCase__ )
snake_case : List[str] = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Any = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case : Optional[Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
snake_case : int = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : str = self.num_labels
snake_case : Tuple = TFDistilBertForTokenClassification(UpperCamelCase__ )
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
snake_case : List[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = self.prepare_config_and_inputs()
((snake_case) ,(snake_case) ,(snake_case) ,(snake_case) ,(snake_case) ,(snake_case)) : Union[str, Any] = config_and_inputs
snake_case : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
__UpperCAmelCase : List[str] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCAmelCase : Optional[int] = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[Any] = False
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : int = TFDistilBertModelTester(self )
snake_case : str = ConfigTester(self , config_class=UpperCamelCase__ , dim=37 )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase__ )
@slow
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
snake_case : Optional[int] = TFDistilBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : str = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
snake_case : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case : Any = model(UpperCamelCase__ )[0]
snake_case : Dict = [1, 6, 768]
self.assertEqual(output.shape , UpperCamelCase__ )
snake_case : str = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 )
| 203
| 0
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
A_ : Dict = logging.get_logger(__name__)
A_ : Tuple = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Optional[int] = '''codegen'''
UpperCAmelCase__: Union[str, Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , A__=5_0400 , A__=2048 , A__=2048 , A__=4096 , A__=28 , A__=16 , A__=64 , A__=None , A__="gelu_new" , A__=0.0 , A__=0.0 , A__=0.0 , A__=1e-5 , A__=0.0_2 , A__=True , A__=5_0256 , A__=5_0256 , A__=False , **A__ , ):
A__ : str = vocab_size
A__ : Optional[int] = n_ctx
A__ : Optional[Any] = n_positions
A__ : Dict = n_embd
A__ : List[str] = n_layer
A__ : Dict = n_head
A__ : Tuple = n_inner
A__ : Union[str, Any] = rotary_dim
A__ : Optional[Any] = activation_function
A__ : Union[str, Any] = resid_pdrop
A__ : str = embd_pdrop
A__ : List[str] = attn_pdrop
A__ : Tuple = layer_norm_epsilon
A__ : Optional[Any] = initializer_range
A__ : Optional[int] = use_cache
A__ : Tuple = bos_token_id
A__ : List[Any] = eos_token_id
super().__init__(
bos_token_id=A__ , eos_token_id=A__ , tie_word_embeddings=A__ , **A__ )
class _a (__magic_name__ ):
'''simple docstring'''
def __init__( self , A__ , A__ = "default" , A__ = None , A__ = False , ):
super().__init__(A__ , task=A__ , patching_specs=A__ , use_past=A__ )
if not getattr(self._config , """pad_token_id""" , A__ ):
# TODO: how to do that better?
A__ : Tuple = 0
@property
def __A ( self ):
A__ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(A__ , direction="""inputs""" )
A__ : List[str] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
A__ : Optional[int] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def __A ( self ):
return self._config.n_layer
@property
def __A ( self ):
return self._config.n_head
def __A ( self , A__ , A__ = -1 , A__ = -1 , A__ = False , A__ = None , ):
A__ : Any = super(A__ , self ).generate_dummy_inputs(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
A__ : Optional[int] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] = seqlen + 2
A__ : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : str = [
(torch.zeros(A__ ), torch.zeros(A__ )) for _ in range(self.num_layers )
]
A__ : int = common_inputs["""attention_mask"""]
if self.use_past:
A__ : Any = ordered_inputs["""attention_mask"""].dtype
A__ : Optional[int] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(A__ , A__ , dtype=A__ )] , dim=1 )
return ordered_inputs
@property
def __A ( self ):
return 13
| 350
|
from typing import Any
def UpperCamelCase (lowercase_: list ) -> list[Any]:
if not input_list:
return []
A__ : Any = [input_list.count(lowercase_ ) for value in input_list]
A__ : List[Any] = max(lowercase_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 141
| 0
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_A = '''\
'''
_A = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
_A = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = True , __UpperCamelCase=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase_ = """cuda"""
else:
UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(__UpperCamelCase )
UpperCamelCase_ = model.to(__UpperCamelCase )
UpperCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__UpperCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase_ = model.config.max_length - 1
else:
UpperCamelCase_ = model.config.max_length
UpperCamelCase_ = tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors="""pt""" , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase )
UpperCamelCase_ = encodings["""input_ids"""]
UpperCamelCase_ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase_ = []
UpperCamelCase_ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ):
UpperCamelCase_ = min(start_index + batch_size , len(__UpperCamelCase ) )
UpperCamelCase_ = encoded_texts[start_index:end_index]
UpperCamelCase_ = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase )
UpperCamelCase_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 )
UpperCamelCase_ = encoded_batch
with torch.no_grad():
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits
UpperCamelCase_ = out_logits[..., :-1, :].contiguous()
UpperCamelCase_ = labels[..., 1:].contiguous()
UpperCamelCase_ = attn_mask[..., 1:].contiguous()
UpperCamelCase_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
| 122
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : torch.FloatTensor
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , __UpperCamelCase = 3 , __UpperCamelCase = 3 , __UpperCamelCase = ("DownEncoderBlock2D",) , __UpperCamelCase = ("UpDecoderBlock2D",) , __UpperCamelCase = (6_4,) , __UpperCamelCase = 1 , __UpperCamelCase = "silu" , __UpperCamelCase = 3 , __UpperCamelCase = 3_2 , __UpperCamelCase = 2_5_6 , __UpperCamelCase = 3_2 , __UpperCamelCase = None , __UpperCamelCase = 0.18_215 , __UpperCamelCase = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
UpperCamelCase_ = Encoder(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , down_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , double_z=__UpperCamelCase , )
UpperCamelCase_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
UpperCamelCase_ = VectorQuantizer(__UpperCamelCase , __UpperCamelCase , beta=0.25 , remap=__UpperCamelCase , sane_index_shape=__UpperCamelCase )
UpperCamelCase_ = nn.Convad(__UpperCamelCase , __UpperCamelCase , 1 )
# pass init params to Decoder
UpperCamelCase_ = Decoder(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , up_block_types=__UpperCamelCase , block_out_channels=__UpperCamelCase , layers_per_block=__UpperCamelCase , act_fn=__UpperCamelCase , norm_num_groups=__UpperCamelCase , norm_type=__UpperCamelCase , )
@apply_forward_hook
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ):
"""simple docstring"""
UpperCamelCase_ = self.encoder(__UpperCamelCase )
UpperCamelCase_ = self.quant_conv(__UpperCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__UpperCamelCase )
@apply_forward_hook
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = False , __UpperCamelCase = True ):
"""simple docstring"""
if not force_not_quantize:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = self.quantize(__UpperCamelCase )
else:
UpperCamelCase_ = h
UpperCamelCase_ = self.post_quant_conv(__UpperCamelCase )
UpperCamelCase_ = self.decoder(__UpperCamelCase , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = True ):
"""simple docstring"""
UpperCamelCase_ = sample
UpperCamelCase_ = self.encode(__UpperCamelCase ).latents
UpperCamelCase_ = self.decode(__UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCamelCase )
| 122
| 1
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _lowerCamelCase( a ):
__a = torch.exp(a )
__a = torch.sum(a , dim=1 ) # sum of exp(x_i)
__a = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(a ) - B / A
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = config.output_attentions
__a = config.output_hidden_states
__a = nn.ModuleList([BertLayer(lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
__a = nn.ModuleList([BertHighway(lowerCamelCase ) for _ in range(config.num_hidden_layers )] )
__a = [-1 for _ in range(config.num_hidden_layers )]
def a__ ( self , lowerCamelCase ):
if (type(lowerCamelCase ) is float) or (type(lowerCamelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
__a = x
else:
__a = x
def a__ ( self , lowerCamelCase ):
__a = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def a__ ( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
__a = ()
__a = ()
__a = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = layer_module(
lowerCamelCase , lowerCamelCase , head_mask[i] , lowerCamelCase , lowerCamelCase )
__a = layer_outputs[0]
if self.output_attentions:
__a = all_attentions + (layer_outputs[1],)
__a = (hidden_states,)
if self.output_hidden_states:
__a = current_outputs + (all_hidden_states,)
if self.output_attentions:
__a = current_outputs + (all_attentions,)
__a = self.highway[i](lowerCamelCase )
# logits, pooled_output
if not self.training:
__a = highway_exit[0]
__a = entropy(lowerCamelCase )
__a = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__a = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__a = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowerCamelCase , i + 1 )
else:
__a = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = (hidden_states,)
if self.output_hidden_states:
__a = outputs + (all_hidden_states,)
if self.output_attentions:
__a = outputs + (all_attentions,)
__a = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """, snake_case_, )
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
__a = config
__a = BertEmbeddings(lowerCamelCase )
__a = DeeBertEncoder(lowerCamelCase )
__a = BertPooler(lowerCamelCase )
self.init_weights()
def a__ ( self ):
self.encoder.init_highway_pooler(self.pooler )
def a__ ( self ):
return self.embeddings.word_embeddings
def a__ ( self , lowerCamelCase ):
__a = value
def a__ ( self , lowerCamelCase ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowerCamelCase )
@add_start_docstrings_to_model_forward(lowerCamelCase )
def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
__a = input_ids.size()
elif inputs_embeds is not None:
__a = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
__a = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__a = torch.ones(lowerCamelCase , device=lowerCamelCase )
if encoder_attention_mask is None:
__a = torch.ones(lowerCamelCase , device=lowerCamelCase )
if token_type_ids is None:
__a = torch.zeros(lowerCamelCase , dtype=torch.long , device=lowerCamelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__a = self.get_extended_attention_mask(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__a = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__a = encoder_attention_mask[:, None, None, :]
__a = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__a = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__a = self.get_head_mask(lowerCamelCase , self.config.num_hidden_layers )
__a = self.embeddings(
input_ids=lowerCamelCase , position_ids=lowerCamelCase , token_type_ids=lowerCamelCase , inputs_embeds=lowerCamelCase )
__a = self.encoder(
lowerCamelCase , attention_mask=lowerCamelCase , head_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase )
__a = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase , lowerCamelCase ):
__a = message
__a = exit_layer # start from 1!
class snake_case__ ( nn.Module ):
def __init__( self , lowerCamelCase ):
super().__init__()
__a = BertPooler(lowerCamelCase )
__a = nn.Dropout(config.hidden_dropout_prob )
__a = nn.Linear(config.hidden_size , config.num_labels )
def a__ ( self , lowerCamelCase ):
# Pooler
__a = encoder_outputs[0]
__a = self.pooler(lowerCamelCase )
# "return" pooler_output
# BertModel
__a = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__a = bmodel_output[1]
__a = self.dropout(lowerCamelCase )
__a = self.classifier(lowerCamelCase )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """, snake_case_, )
class snake_case__ ( snake_case_ ):
def __init__( self , lowerCamelCase ):
super().__init__(lowerCamelCase )
__a = config.num_labels
__a = config.num_hidden_layers
__a = DeeBertModel(lowerCamelCase )
__a = nn.Dropout(config.hidden_dropout_prob )
__a = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCamelCase )
def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=-1 , lowerCamelCase=False , ):
__a = self.num_layers
try:
__a = self.bert(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , position_ids=lowerCamelCase , head_mask=lowerCamelCase , inputs_embeds=lowerCamelCase , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__a = outputs[1]
__a = self.dropout(lowerCamelCase )
__a = self.classifier(lowerCamelCase )
__a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__a = e.message
__a = e.exit_layer
__a = outputs[0]
if not self.training:
__a = entropy(lowerCamelCase )
__a = []
__a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__a = []
for highway_exit in outputs[-1]:
__a = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__a = CrossEntropyLoss()
__a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCamelCase )
if train_highway:
__a = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__a = (loss,) + outputs
if not self.training:
__a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 268
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class snake_case__ ( snake_case_ ):
_snake_case : List[str] = """WhisperFeatureExtractor"""
_snake_case : Any = """WhisperTokenizer"""
def __init__( self , lowerCamelCase , lowerCamelCase ):
super().__init__(lowerCamelCase , lowerCamelCase )
__a = self.feature_extractor
__a = False
def a__ ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True ):
return self.tokenizer.get_decoder_prompt_ids(task=lowerCamelCase , language=lowerCamelCase , no_timestamps=lowerCamelCase )
def __call__( self , *lowerCamelCase , **lowerCamelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase , **lowerCamelCase )
__a = kwargs.pop("audio" , lowerCamelCase )
__a = kwargs.pop("sampling_rate" , lowerCamelCase )
__a = kwargs.pop("text" , lowerCamelCase )
if len(lowerCamelCase ) > 0:
__a = args[0]
__a = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__a = self.feature_extractor(lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , **lowerCamelCase )
if text is not None:
__a = self.tokenizer(lowerCamelCase , **lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a = encodings["input_ids"]
return inputs
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase="np" ):
return self.tokenizer.get_prompt_ids(lowerCamelCase , return_tensors=lowerCamelCase )
| 268
| 1
|
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase = '''#'''
class lowerCAmelCase :
'''simple docstring'''
def __init__( self ) -> None:
SCREAMING_SNAKE_CASE = {}
def __A ( self , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = trie[char]
SCREAMING_SNAKE_CASE = True
def __A ( self , lowerCAmelCase__ ) -> tuple | list:
SCREAMING_SNAKE_CASE = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE = trie[char]
else:
return []
return self._elements(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ ) -> tuple:
SCREAMING_SNAKE_CASE = []
for c, v in d.items():
SCREAMING_SNAKE_CASE = [' '] if c == END else [(c + s) for s in self._elements(lowerCAmelCase__ )]
result.extend(lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
__UpperCamelCase = Trie()
__UpperCamelCase = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> tuple:
SCREAMING_SNAKE_CASE = trie.find_word(SCREAMING_SNAKE_CASE_ )
return tuple(string + word for word in suffixes )
def lowercase () -> None:
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 113
|
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : int = 10_00 ) -> int:
SCREAMING_SNAKE_CASE = 2**power
SCREAMING_SNAKE_CASE = 0
while n:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 113
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=[30, 30], lowerCAmelCase__=2, lowerCAmelCase__=3, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=32, lowerCAmelCase__=5, lowerCAmelCase__=4, lowerCAmelCase__=37, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=10, lowerCAmelCase__=0.02, lowerCAmelCase__=3, lowerCAmelCase__=None, lowerCAmelCase__=8, lowerCAmelCase__=10, ) -> Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = n_targets
snake_case_ = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
snake_case_ = (image_size[1] // patch_size) * (image_size[0] // patch_size)
snake_case_ = num_patches + 1 + self.num_detection_tokens
def a_ ( self) -> Dict:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
snake_case_ = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
snake_case_ = []
for i in range(self.batch_size):
snake_case_ = {}
snake_case_ = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=lowerCAmelCase__)
snake_case_ = torch.rand(self.n_targets, 4, device=lowerCAmelCase__)
labels.append(lowerCAmelCase__)
snake_case_ = self.get_config()
return config, pixel_values, labels
def a_ ( self) -> Any:
return YolosConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase__, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Tuple:
snake_case_ = YolosModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]:
snake_case_ = YolosForObjectDetection(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(pixel_values=lowerCAmelCase__)
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4))
snake_case_ = model(pixel_values=lowerCAmelCase__, labels=lowerCAmelCase__)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4))
def a_ ( self) -> List[str]:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=False) -> Tuple:
snake_case_ = super()._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__, return_labels=lowerCAmelCase__)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
snake_case_ = []
for i in range(self.model_tester.batch_size):
snake_case_ = {}
snake_case_ = torch.ones(
size=(self.model_tester.n_targets,), device=lowerCAmelCase__, dtype=torch.long)
snake_case_ = torch.ones(
self.model_tester.n_targets, 4, device=lowerCAmelCase__, dtype=torch.float)
labels.append(lowerCAmelCase__)
snake_case_ = labels
return inputs_dict
def a_ ( self) -> str:
snake_case_ = YolosModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, has_text_modality=lowerCAmelCase__, hidden_size=37)
def a_ ( self) -> Optional[Any]:
self.config_tester.run_common_tests()
def a_ ( self) -> int:
# YOLOS does not use inputs_embeds
pass
def a_ ( self) -> Optional[int]:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__, nn.Linear))
def a_ ( self) -> Any:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCAmelCase__)
def a_ ( self) -> Any:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
# in YOLOS, the seq_len is different
snake_case_ = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
snake_case_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase__), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
snake_case_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase__), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], )
snake_case_ = len(lowerCAmelCase__)
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
snake_case_ = 1
self.assertEqual(out_len + added_hidden_states, len(lowerCAmelCase__))
snake_case_ = outputs.attentions
self.assertEqual(len(lowerCAmelCase__), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def a_ ( self) -> Dict:
def check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__):
snake_case_ = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
snake_case_ = outputs.hidden_states
snake_case_ = getattr(
self.model_tester, 'expected_num_hidden_layers', self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(lowerCAmelCase__), lowerCAmelCase__)
# YOLOS has a different seq_length
snake_case_ = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*lowerCAmelCase__)
@slow
def a_ ( self) -> Dict:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = YolosModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def UpperCAmelCase ( ) -> List[str]:
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def a_ ( self) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def a_ ( self) -> Any:
snake_case_ = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(lowerCAmelCase__)
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=lowerCAmelCase__, return_tensors='pt').to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
snake_case_ = model(inputs.pixel_values)
# verify outputs
snake_case_ = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape, lowerCAmelCase__)
snake_case_ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]], device=lowerCAmelCase__, )
snake_case_ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]], device=lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCAmelCase__, atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], lowerCAmelCase__, atol=1e-4))
# verify postprocessing
snake_case_ = image_processor.post_process_object_detection(
lowerCAmelCase__, threshold=0.3, target_sizes=[image.size[::-1]])[0]
snake_case_ = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(lowerCAmelCase__)
snake_case_ = [75, 75, 17, 63, 17]
snake_case_ = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(lowerCAmelCase__)
self.assertEqual(len(results['scores']), 5)
self.assertTrue(torch.allclose(results['scores'], lowerCAmelCase__, atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist(), lowerCAmelCase__)
self.assertTrue(torch.allclose(results['boxes'][0, :], lowerCAmelCase__))
| 312
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
snake_case_ = number_of_bytes // partitions
snake_case_ = []
for i in range(UpperCAmelCase ):
snake_case_ = i * bytes_per_partition + 1
snake_case_ = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase__ = None
lowercase__ = logging.get_logger(__name__)
lowercase__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase__ = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
lowercase__ = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
lowercase__ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : List[str] = VOCAB_FILES_NAMES
a_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
a_ : str = ["""input_ids""", """attention_mask"""]
a_ : Any = MBartTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : Dict , a_ : Optional[int]=None , a_ : Dict=None , a_ : Optional[int]="<s>" , a_ : Optional[int]="</s>" , a_ : Dict="</s>" , a_ : int="<s>" , a_ : Dict="<unk>" , a_ : Optional[int]="<pad>" , a_ : str="<mask>" , a_ : Union[str, Any]=None , a_ : Tuple=None , a_ : Any=None , **a_ : str , ):
lowerCAmelCase_ : Union[str, Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
lowerCAmelCase_ : int = vocab_file
lowerCAmelCase_ : Tuple = False if not self.vocab_file else True
lowerCAmelCase_ : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCAmelCase_ : List[str] = {
lang_code: self.convert_tokens_to_ids(lowerCamelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase_ : Optional[Any] = src_lang if src_lang is not None else 'en_XX'
lowerCAmelCase_ : str = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase_ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase ( self : Tuple ):
return self._src_lang
@src_lang.setter
def lowerCamelCase ( self : Dict , a_ : str ):
lowerCAmelCase_ : Union[str, Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase ( self : Optional[Any] , a_ : Tuple , a_ : Optional[Any] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase ( self : Optional[Any] , a_ : Optional[Any] , a_ : Any = None ):
lowerCAmelCase_ : List[Any] = [self.sep_token_id]
lowerCAmelCase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase ( self : List[Any] , a_ : int , a_ : Any , a_ : Optional[Any] , a_ : Tuple , **a_ : str ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase_ : List[str] = src_lang
lowerCAmelCase_ : List[str] = self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
lowerCAmelCase_ : Optional[Any] = self.convert_tokens_to_ids(lowerCamelCase__ )
lowerCAmelCase_ : Optional[int] = tgt_lang_id
return inputs
def lowerCamelCase ( self : int , a_ : List[str] , a_ : Tuple = "en_XX" , a_ : Tuple = None , a_ : List[str] = "ro_RO" , **a_ : Any , ):
lowerCAmelCase_ : Dict = src_lang
lowerCAmelCase_ : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ )
def lowerCamelCase ( self : int ):
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase ( self : Optional[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase ( self : Optional[int] , a_ : List[Any] ):
lowerCAmelCase_ : Optional[Any] = self.convert_tokens_to_ids(lowerCamelCase__ )
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase_ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase ( self : Optional[Any] , a_ : str ):
lowerCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowerCamelCase__ )
lowerCAmelCase_ : int = []
lowerCAmelCase_ : List[str] = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase_ : Optional[int] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase_ : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase_ : List[str] = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase ( self : List[Any] , a_ : Tuple , a_ : Any = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCAmelCase_ : List[Any] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 241
|
def A ( a_ ,a_ ,a_ ) -> int:
def update_area_of_max_square(a_ ,a_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__UpperCamelCase : Optional[int] =update_area_of_max_square(a_ ,col + 1 )
__UpperCamelCase : List[str] =update_area_of_max_square(row + 1 ,col + 1 )
__UpperCamelCase : List[Any] =update_area_of_max_square(row + 1 ,a_ )
if mat[row][col]:
__UpperCamelCase : Optional[Any] =1 + min([right, diagonal, down] )
__UpperCamelCase : Dict =max(largest_square_area[0] ,a_ )
return sub_problem_sol
else:
return 0
__UpperCamelCase : Union[str, Any] =[0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def A ( a_ ,a_ ,a_ ) -> int:
def update_area_of_max_square_using_dp_array(
a_ ,a_ ,a_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__UpperCamelCase : Tuple =update_area_of_max_square_using_dp_array(a_ ,col + 1 ,a_ )
__UpperCamelCase : Optional[int] =update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,a_ )
__UpperCamelCase : Any =update_area_of_max_square_using_dp_array(row + 1 ,a_ ,a_ )
if mat[row][col]:
__UpperCamelCase : Optional[Any] =1 + min([right, diagonal, down] )
__UpperCamelCase : str =max(largest_square_area[0] ,a_ )
__UpperCamelCase : Any =sub_problem_sol
return sub_problem_sol
else:
return 0
__UpperCamelCase : Tuple =[0]
__UpperCamelCase : List[Any] =[[-1] * cols for _ in range(a_ )]
update_area_of_max_square_using_dp_array(0 ,0 ,a_ )
return largest_square_area[0]
def A ( a_ ,a_ ,a_ ) -> int:
__UpperCamelCase : Dict =[[0] * (cols + 1) for _ in range(rows + 1 )]
__UpperCamelCase : int =0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
__UpperCamelCase : Optional[Any] =dp_array[row][col + 1]
__UpperCamelCase : int =dp_array[row + 1][col + 1]
__UpperCamelCase : Tuple =dp_array[row + 1][col]
if mat[row][col] == 1:
__UpperCamelCase : Tuple =1 + min(a_ ,a_ ,a_ )
__UpperCamelCase : Any =max(dp_array[row][col] ,a_ )
else:
__UpperCamelCase : Dict =0
return largest_square_area
def A ( a_ ,a_ ,a_ ) -> int:
__UpperCamelCase : Any =[0] * (cols + 1)
__UpperCamelCase : List[Any] =[0] * (cols + 1)
__UpperCamelCase : Tuple =0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
__UpperCamelCase : Any =current_row[col + 1]
__UpperCamelCase : Optional[Any] =next_row[col + 1]
__UpperCamelCase : Union[str, Any] =next_row[col]
if mat[row][col] == 1:
__UpperCamelCase : Any =1 + min(a_ ,a_ ,a_ )
__UpperCamelCase : Optional[int] =max(current_row[col] ,a_ )
else:
__UpperCamelCase : List[str] =0
__UpperCamelCase : Optional[Any] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 71
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a : Any = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a : Optional[Any] = TaTokenizerFast
a : Any = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'MT5EncoderModel',
'MT5ForConditionalGeneration',
'MT5ForQuestionAnswering',
'MT5Model',
'MT5PreTrainedModel',
'MT5Stack',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a : Union[str, Any] = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 366
|
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: str = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase_: Tuple = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase_: List[str] = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase_: Optional[Any] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase_: Optional[Any] = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowerCAmelCase__ )-1}' )
if "norm" in key:
UpperCAmelCase_: Dict = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase_: Any = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase_: str = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowerCAmelCase__ )-1}' )
if "layer_norm1" in key:
UpperCAmelCase_: Tuple = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase_: int = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase_: Any = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase_: Optional[Any] = key.replace(F'block{idx}' , F'block.{int(lowerCAmelCase__ )-1}' )
if "attn.q" in key:
UpperCAmelCase_: Dict = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase_: Tuple = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase_: str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase_: Tuple = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase_: Optional[int] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase_: Optional[int] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase_: Tuple = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase_: List[str] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase_: Optional[int] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase_: List[str] = key.replace(F'linear_c{idx}' , F'linear_c.{int(lowerCAmelCase__ )-1}' )
if "bot_conv" in key:
UpperCAmelCase_: Optional[Any] = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase_: Any = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase_: Optional[Any] = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase_: Dict = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase_: Any = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase_: Union[str, Any] = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase_: List[Any] = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase_: Union[str, Any] = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase_: Union[str, Any] = value
return new_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase_: str = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
UpperCAmelCase_: Tuple = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase_: Any = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase_: Tuple = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase_: Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase_: int = kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_: List[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[Any]=False , lowerCAmelCase__: Optional[Any]=None ):
"""simple docstring"""
UpperCAmelCase_: str = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase_: Dict = GLPNImageProcessor()
# prepare image
UpperCAmelCase_: List[str] = prepare_img()
UpperCAmelCase_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase_: Any = torch.load(lowerCAmelCase__ , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase_: Optional[Any] = rename_keys(lowerCAmelCase__ )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
UpperCAmelCase_: Dict = GLPNForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
UpperCAmelCase_: Any = model(lowerCAmelCase__ )
UpperCAmelCase_: Union[str, Any] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase_: List[str] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase_: List[str] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
UpperCAmelCase_: Any = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
a : List[Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 82
| 0
|
'''simple docstring'''
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Any:
"""simple docstring"""
A : Tuple = data
A : Optional[Any] = previous
A : Union[str, Any] = next_node
def __str__( self ) -> str:
"""simple docstring"""
return F'{self.data}'
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self.data
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return self.next
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return self.previous
class A :
def __init__( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : List[str] = head
def __iter__( self ) -> Any:
"""simple docstring"""
return self
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
if not self.current:
raise StopIteration
else:
A : List[str] = self.current.get_data()
A : Union[str, Any] = self.current.get_next()
return value
class A :
def __init__( self ) -> Optional[Any]:
"""simple docstring"""
A : int = None # First node in list
A : str = None # Last node in list
def __str__( self ) -> int:
"""simple docstring"""
A : int = self.head
A : Optional[int] = []
while current is not None:
nodes.append(current.get_data() )
A : List[str] = current.get_next()
return " ".join(str(SCREAMING_SNAKE_CASE ) for node in nodes )
def __contains__( self , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : str = self.head
while current:
if current.get_data() == value:
return True
A : Optional[int] = current.get_next()
return False
def __iter__( self ) -> int:
"""simple docstring"""
return LinkedListIterator(self.head )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
if self.head:
return self.head.get_data()
return None
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
if self.tail:
return self.tail.get_data()
return None
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self.head is None:
A : Any = node
A : List[Any] = node
else:
self.insert_before_node(self.head , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE )
else:
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : List[Any] = Node(SCREAMING_SNAKE_CASE )
if self.head is None:
self.set_head(SCREAMING_SNAKE_CASE )
else:
self.set_tail(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : Tuple = node
A : int = node.previous
if node.get_previous() is None:
A : int = node_to_insert
else:
A : Tuple = node_to_insert
A : Optional[int] = node_to_insert
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : Tuple = node
A : int = node.next
if node.get_next() is None:
A : Optional[int] = node_to_insert
else:
A : Tuple = node_to_insert
A : List[str] = node_to_insert
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
A : int = 1
A : int = Node(SCREAMING_SNAKE_CASE )
A : List[str] = self.head
while node:
if current_position == position:
self.insert_before_node(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return
current_position += 1
A : Any = node.next
self.insert_after_node(self.tail , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Node:
"""simple docstring"""
A : str = self.head
while node:
if node.get_data() == item:
return node
A : int = node.get_next()
raise Exception('''Node not found''' )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
if (node := self.get_node(SCREAMING_SNAKE_CASE )) is not None:
if node == self.head:
A : Optional[Any] = self.head.get_next()
if node == self.tail:
A : Union[str, Any] = self.tail.get_previous()
self.remove_node_pointers(SCREAMING_SNAKE_CASE )
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if node.get_next():
A : Union[str, Any] = node.previous
if node.get_previous():
A : Optional[int] = node.next
A : int = None
A : Optional[int] = None
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
return self.head is None
def lowerCAmelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase : Optional[Any] = None
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Dict = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
lowercase : Tuple = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
lowercase : List[str] = {
'google/rembert': 2_56,
}
lowercase : Dict = '▁'
class A ( __snake_case ):
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = RemBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
A : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
A : List[Any] = do_lower_case
A : str = remove_space
A : int = keep_accents
A : Union[str, Any] = vocab_file
A : List[Any] = False if not self.vocab_file else True
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
A : List[Any] = [self.sep_token_id]
A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
A : Tuple = [self.sep_token_id]
A : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE ) )
return
A : Any = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 3
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__A : Any = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : int = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
__A : List[Any] = {
"unc-nlp/lxmert-base-uncased": 512,
}
__A : Optional[Any] = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = LxmertTokenizer
def __init__( self : Dict , lowerCamelCase : Tuple=None , lowerCamelCase : List[str]=None , lowerCamelCase : Dict=True , lowerCamelCase : List[Any]="[UNK]" , lowerCamelCase : Tuple="[SEP]" , lowerCamelCase : str="[PAD]" , lowerCamelCase : Any="[CLS]" , lowerCamelCase : str="[MASK]" , lowerCamelCase : Dict=True , lowerCamelCase : Union[str, Any]=None , **lowerCamelCase : Optional[int] , ) -> Optional[Any]:
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
lowerCAmelCase_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Tuple = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : Optional[Any] = do_lower_case
lowerCAmelCase_ : Dict = strip_accents
lowerCAmelCase_ : List[Any] = tokenize_chinese_chars
lowerCAmelCase_ : List[Any] = normalizer_class(**lowerCamelCase )
lowerCAmelCase_ : Tuple = do_lower_case
def __lowercase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[Any]=None ) -> int:
lowerCAmelCase_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : str , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase_ : int = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
| 350
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Tuple ) -> Dict:
lowerCAmelCase_ : str = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : List[Any] ) -> int:
lowerCAmelCase_ : Tuple = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : int = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : int ) -> List[Any]:
lowerCAmelCase_ : Dict = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : str ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def __lowercase ( self : List[Any] ) -> Tuple:
lowerCAmelCase_ : Union[str, Any] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase_ : Union[str, Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : str = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase_ : Optional[int] = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Tuple ) -> Optional[Any]:
# pass variant but use the non-variant filenames
lowerCAmelCase_ : Dict = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
lowerCAmelCase_ : str = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase_ : str = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCAmelCase_ : List[str] = """fp16"""
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowerCAmelCase_ : str = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
lowerCAmelCase_ : Optional[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : List[Any] ) -> List[Any]:
# pass variant but use the non-variant filenames
lowerCAmelCase_ : Dict = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
lowerCAmelCase_ : Any = """fp16"""
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __lowercase ( self : Dict ) -> Any:
lowerCAmelCase_ : Optional[int] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowerCAmelCase_ : int = """fp16"""
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
| 89
| 0
|
"""simple docstring"""
import random
from typing import Any
def a_ ( lowerCamelCase ):
for _ in range(len(lowerCamelCase ) ):
UpperCAmelCase__ = random.randint(0 , len(lowerCamelCase ) - 1 )
UpperCAmelCase__ = random.randint(0 , len(lowerCamelCase ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCAmelCase__ : Dict = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase__ : str = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 98
|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.ModuleList(a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : torch.Tensor , a : List[torch.tensor] , a : List[float] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[Dict[str, Any]] = None , a : bool = False , a : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ):
__lowerCamelCase , __lowerCamelCase = controlnet(
a , a , a , a , a , a , a , a , a , a , a , )
# merge samples
if i == 0:
__lowerCamelCase , __lowerCamelCase = down_samples, mid_sample
else:
__lowerCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a , a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, os.PathLike] , a : bool = True , a : Callable = None , a : bool = False , a : Optional[str] = None , ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , )
idx += 1
__lowerCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , a : Optional[Union[str, os.PathLike]] , **a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCamelCase = pretrained_model_path
while os.path.isdir(a ):
__lowerCamelCase = ControlNetModel.from_pretrained(a , **a )
controlnets.append(a )
idx += 1
__lowerCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(a )} controlnets loaded from {pretrained_model_path}.""" )
if len(a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(a )
| 67
| 0
|
"""simple docstring"""
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_lowercase : str = """sshleifer/mar_enro_6_3_student"""
class _UpperCAmelCase ( lowerCamelCase__ ):
def a ( self : Optional[Any] ):
super().setUp()
__UpperCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=_lowercase , )
__UpperCAmelCase = F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def a ( self : Any ):
MarianMTModel.from_pretrained(_lowercase )
@slow
@require_torch_gpu
def a ( self : int ):
__UpperCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
__UpperCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
__UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
__UpperCAmelCase = bash_script.replace(_lowercase , str(_lowercase ) )
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__UpperCAmelCase = F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__UpperCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(_lowercase , '''argv''' , _lowercase ):
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(_lowercase )
__UpperCAmelCase = SummarizationModule.add_model_specific_args(_lowercase , os.getcwd() )
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = main(_lowercase )
# Check metrics
__UpperCAmelCase = load_json(model.metrics_save_path )
__UpperCAmelCase = metrics['''val'''][0]
__UpperCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _lowercase )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__UpperCAmelCase = os.listdir(_lowercase )
__UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
__UpperCAmelCase = os.path.join(args.output_dir , _lowercase )
__UpperCAmelCase = torch.load(_lowercase , map_location='''cpu''' )
__UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__UpperCAmelCase = {os.path.basename(_lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class _UpperCAmelCase ( lowerCamelCase__ ):
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def a ( self : Union[str, Any] ):
__UpperCAmelCase = F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
__UpperCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 1_28,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
__UpperCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
__UpperCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
__UpperCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
__UpperCAmelCase = bash_script.replace(_lowercase , str(_lowercase ) )
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = bash_script.replace('''--fp16''' , '''''' )
__UpperCAmelCase = 6
__UpperCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'''--gpus=1''',
'''--learning_rate=1e-3''',
F'''--num_train_epochs={epochs}''',
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(_lowercase , '''argv''' , _lowercase ):
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = pl.Trainer.add_argparse_args(_lowercase )
__UpperCAmelCase = SummarizationDistiller.add_model_specific_args(_lowercase , os.getcwd() )
__UpperCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__UpperCAmelCase = distill_main(_lowercase )
# Check metrics
__UpperCAmelCase = load_json(model.metrics_save_path )
__UpperCAmelCase = metrics['''val'''][0]
__UpperCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , _lowercase )
# check lightning ckpt can be loaded and has a reasonable statedict
__UpperCAmelCase = os.listdir(_lowercase )
__UpperCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
__UpperCAmelCase = os.path.join(args.output_dir , _lowercase )
__UpperCAmelCase = torch.load(_lowercase , map_location='''cpu''' )
__UpperCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__UpperCAmelCase = {os.path.basename(_lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 356
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _UpperCAmelCase :
def __init__( self : Dict , _lowercase : int , _lowercase : List[str]=13 , _lowercase : Dict=32 , _lowercase : Any=2 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=16 , _lowercase : Optional[int]=[1, 2, 1] , _lowercase : int=[2, 2, 4] , _lowercase : Optional[Any]=2 , _lowercase : Union[str, Any]=2.0 , _lowercase : Any=True , _lowercase : Optional[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : Dict=0.1 , _lowercase : str="gelu" , _lowercase : List[Any]=False , _lowercase : List[Any]=True , _lowercase : Optional[Any]=0.02 , _lowercase : str=1E-5 , _lowercase : str=True , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Any=10 , _lowercase : int=8 , _lowercase : Optional[Any]=["stage1", "stage2", "stage3"] , _lowercase : Optional[Any]=[1, 2, 3] , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = patch_norm
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = is_training
__UpperCAmelCase = scope
__UpperCAmelCase = use_labels
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = encoder_stride
__UpperCAmelCase = out_features
__UpperCAmelCase = out_indices
def a ( self : int ):
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Dict ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : int ):
__UpperCAmelCase = MaskFormerSwinModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
__UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a ( self : int , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Dict ):
__UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowercase ):
__UpperCAmelCase = ['''stem''']
__UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ : Optional[int] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
a__ : List[str] = False
a__ : int = False
a__ : str = False
a__ : str = False
a__ : Any = False
def a ( self : Optional[Any] ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def a ( self : int ):
pass
def a ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : str ):
return
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def a ( self : List[Any] ):
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def a ( self : str ):
pass
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def a ( self : Optional[Any] ):
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def a ( self : Optional[Any] ):
pass
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : Tuple ):
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# Swin has a different seq_length
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a ( self : str ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = 3
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def a ( self : Any ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a ( self : str ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a ( self : Tuple ):
pass
def a ( self : Tuple ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowercase : List[str] ):
__UpperCAmelCase = 0
return t
def check_equivalence(_lowercase : List[Any] , _lowercase : Any , _lowercase : str , _lowercase : List[str]={} ):
with torch.no_grad():
__UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase )
__UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple()
def recursive_check(_lowercase : Dict , _lowercase : Optional[Any] ):
if isinstance(_lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ):
recursive_check(_lowercase , _lowercase )
elif isinstance(_lowercase , _lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowercase , _lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowercase ) , set_nan_tensor_to_zero(_lowercase ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}. Dict has'''
F''' `nan`: {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}.'''
) , )
recursive_check(_lowercase , _lowercase )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
@require_torch
class _UpperCAmelCase ( unittest.TestCase , _lowerCAmelCase ):
a__ : Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ : List[str] = MaskFormerSwinConfig
def a ( self : List[str] ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
def a ( self : List[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
__UpperCAmelCase = backbone_class(_lowercase )
backbone.to(_lowercase )
backbone.eval()
__UpperCAmelCase = backbone(**_lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__UpperCAmelCase = backbone(**_lowercase , output_hidden_states=_lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__UpperCAmelCase = backbone(**_lowercase , output_attentions=_lowercase )
self.assertIsNotNone(outputs.attentions )
| 86
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class A_ ( _lowerCamelCase ):
_UpperCAmelCase : str = '''data2vec-vision'''
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str=7_6_8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 ,SCREAMING_SNAKE_CASE__ : int=1_2 ,SCREAMING_SNAKE_CASE__ : str=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : Dict="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-12 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2_2_4 ,SCREAMING_SNAKE_CASE__ : List[Any]=1_6 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=False ,SCREAMING_SNAKE_CASE__ : Dict=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ,SCREAMING_SNAKE_CASE__ : List[Any]=False ,SCREAMING_SNAKE_CASE__ : Dict=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=[3, 5, 7, 1_1] ,SCREAMING_SNAKE_CASE__ : Tuple=[1, 2, 3, 6] ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Tuple=0.4 ,SCREAMING_SNAKE_CASE__ : Tuple=2_5_6 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_5_5 ,**SCREAMING_SNAKE_CASE__ : str ,):
super().__init__(**lowercase_)
__lowerCamelCase : Tuple = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = image_size
__lowerCamelCase : Union[str, Any] = patch_size
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : int = use_mask_token
__lowerCamelCase : Union[str, Any] = use_absolute_position_embeddings
__lowerCamelCase : Any = use_relative_position_bias
__lowerCamelCase : Any = use_shared_relative_position_bias
__lowerCamelCase : Dict = layer_scale_init_value
__lowerCamelCase : Union[str, Any] = drop_path_rate
__lowerCamelCase : Optional[int] = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCamelCase : Any = out_indices
__lowerCamelCase : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCamelCase : Tuple = use_auxiliary_head
__lowerCamelCase : List[str] = auxiliary_loss_weight
__lowerCamelCase : Any = auxiliary_channels
__lowerCamelCase : int = auxiliary_num_convs
__lowerCamelCase : int = auxiliary_concat_input
__lowerCamelCase : str = semantic_loss_ignore_index
class A_ ( _lowerCamelCase ):
_UpperCAmelCase : Dict = version.parse('''1.11''' )
@property
def lowerCAmelCase ( self : Any):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def lowerCAmelCase ( self : str):
return 1E-4
| 73
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Any = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = DPTConfig()
if "large" in checkpoint_url:
snake_case_ = 1024
snake_case_ = 4096
snake_case_ = 24
snake_case_ = 16
snake_case_ = [5, 11, 17, 23]
snake_case_ = [256, 512, 1024, 1024]
snake_case_ = (1, 384, 384)
if "ade" in checkpoint_url:
snake_case_ = True
snake_case_ = 150
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''ade20k-id2label.json'''
snake_case_ = json.load(open(cached_download(hf_hub_url(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ) ), '''r''' ) )
snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = [1, 150, 480, 480]
return config, expected_shape
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case_ = name.replace('''pretrained.model''', '''dpt.encoder''' )
if "pretrained.model" in name:
snake_case_ = name.replace('''pretrained.model''', '''dpt.embeddings''' )
if "patch_embed" in name:
snake_case_ = name.replace('''patch_embed''', '''patch_embeddings''' )
if "pos_embed" in name:
snake_case_ = name.replace('''pos_embed''', '''position_embeddings''' )
if "attn.proj" in name:
snake_case_ = name.replace('''attn.proj''', '''attention.output.dense''' )
if "proj" in name and "project" not in name:
snake_case_ = name.replace('''proj''', '''projection''' )
if "blocks" in name:
snake_case_ = name.replace('''blocks''', '''layer''' )
if "mlp.fc1" in name:
snake_case_ = name.replace('''mlp.fc1''', '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case_ = name.replace('''mlp.fc2''', '''output.dense''' )
if "norm1" in name:
snake_case_ = name.replace('''norm1''', '''layernorm_before''' )
if "norm2" in name:
snake_case_ = name.replace('''norm2''', '''layernorm_after''' )
if "scratch.output_conv" in name:
snake_case_ = name.replace('''scratch.output_conv''', '''head''' )
if "scratch" in name:
snake_case_ = name.replace('''scratch''', '''neck''' )
if "layer1_rn" in name:
snake_case_ = name.replace('''layer1_rn''', '''convs.0''' )
if "layer2_rn" in name:
snake_case_ = name.replace('''layer2_rn''', '''convs.1''' )
if "layer3_rn" in name:
snake_case_ = name.replace('''layer3_rn''', '''convs.2''' )
if "layer4_rn" in name:
snake_case_ = name.replace('''layer4_rn''', '''convs.3''' )
if "refinenet" in name:
snake_case_ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case_ = name.replace(F"refinenet{layer_idx}", F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
snake_case_ = name.replace('''out_conv''', '''projection''' )
if "resConfUnit1" in name:
snake_case_ = name.replace('''resConfUnit1''', '''residual_layer1''' )
if "resConfUnit2" in name:
snake_case_ = name.replace('''resConfUnit2''', '''residual_layer2''' )
if "conv1" in name:
snake_case_ = name.replace('''conv1''', '''convolution1''' )
if "conv2" in name:
snake_case_ = name.replace('''conv2''', '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case_ = name.replace('''pretrained.act_postprocess1.0.project.0''', '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case_ = name.replace('''pretrained.act_postprocess2.0.project.0''', '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case_ = name.replace('''pretrained.act_postprocess3.0.project.0''', '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case_ = name.replace('''pretrained.act_postprocess4.0.project.0''', '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case_ = name.replace('''pretrained.act_postprocess1.3''', '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
snake_case_ = name.replace('''pretrained.act_postprocess1.4''', '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
snake_case_ = name.replace('''pretrained.act_postprocess2.3''', '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
snake_case_ = name.replace('''pretrained.act_postprocess2.4''', '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
snake_case_ = name.replace('''pretrained.act_postprocess3.3''', '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
snake_case_ = name.replace('''pretrained.act_postprocess4.3''', '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
snake_case_ = name.replace('''pretrained.act_postprocess4.4''', '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
snake_case_ = name.replace('''pretrained''', '''dpt''' )
if "bn" in name:
snake_case_ = name.replace('''bn''', '''batch_norm''' )
if "head" in name:
snake_case_ = name.replace('''head''', '''head.head''' )
if "encoder.norm" in name:
snake_case_ = name.replace('''encoder.norm''', '''layernorm''' )
if "auxlayer" in name:
snake_case_ = name.replace('''auxlayer''', '''auxiliary_head.head''' )
return name
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
snake_case_ = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[: config.hidden_size, :]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( ) -> Any:
'''simple docstring'''
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ ,snake_case_ = get_dpt_config(__UpperCAmelCase )
# load original state_dict from URL
snake_case_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase, map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__UpperCAmelCase )
# rename keys
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(__UpperCAmelCase )
snake_case_ = val
# read in qkv matrices
read_in_q_k_v(__UpperCAmelCase, __UpperCAmelCase )
# load HuggingFace model
snake_case_ = DPTForSemanticSegmentation(__UpperCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
# Check outputs on an image
snake_case_ = 480 if '''ade''' in checkpoint_url else 384
snake_case_ = DPTImageProcessor(size=__UpperCAmelCase )
snake_case_ = prepare_img()
snake_case_ = image_processor(__UpperCAmelCase, return_tensors='''pt''' )
# forward pass
snake_case_ = model(**__UpperCAmelCase ).logits if '''ade''' in checkpoint_url else model(**__UpperCAmelCase ).predicted_depth
# Assert logits
snake_case_ = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
snake_case_ = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(__UpperCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3], __UpperCAmelCase, atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3], __UpperCAmelCase )
)
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add model''', use_temp_dir=__UpperCAmelCase, )
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCAmelCase, __UpperCAmelCase ), organization='''nielsr''', commit_message='''Add image processor''', use_temp_dir=__UpperCAmelCase, )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
a : List[Any] = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 56
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
A : Dict = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class __lowerCamelCase ( unittest.TestCase , a_ ):
"""simple docstring"""
def A ( self : Tuple):
_A : Union[str, Any] = load_tool('text-question-answering')
self.tool.setup()
_A : str = load_tool('text-question-answering' , remote=SCREAMING_SNAKE_CASE)
def A ( self : List[str]):
_A : Union[str, Any] = self.tool(SCREAMING_SNAKE_CASE , 'What did Hugging Face do in April 2021?')
self.assertEqual(SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop')
def A ( self : Optional[int]):
_A : List[str] = self.remote_tool(SCREAMING_SNAKE_CASE , 'What did Hugging Face do in April 2021?')
self.assertEqual(SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop')
def A ( self : Union[str, Any]):
_A : List[Any] = self.tool(text=SCREAMING_SNAKE_CASE , question='What did Hugging Face do in April 2021?')
self.assertEqual(SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop')
def A ( self : Union[str, Any]):
_A : List[Any] = self.remote_tool(text=SCREAMING_SNAKE_CASE , question='What did Hugging Face do in April 2021?')
self.assertEqual(SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop')
| 227
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
a = KandinskyVaaPriorPipeline
a = ["prompt"]
a = ["prompt", "negative_prompt"]
a = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
a = False
@property
def A ( self : List[str]):
return 32
@property
def A ( self : List[Any]):
return 32
@property
def A ( self : Dict):
return self.time_input_dim
@property
def A ( self : Tuple):
return self.time_input_dim * 4
@property
def A ( self : Optional[int]):
return 100
@property
def A ( self : Dict):
_A : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def A ( self : Optional[Any]):
torch.manual_seed(0)
_A : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(SCREAMING_SNAKE_CASE)
@property
def A ( self : List[Any]):
torch.manual_seed(0)
_A : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
_A : Any = PriorTransformer(**SCREAMING_SNAKE_CASE)
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_A : str = nn.Parameter(torch.ones(model.clip_std.shape))
return model
@property
def A ( self : List[str]):
torch.manual_seed(0)
_A : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_A : Union[str, Any] = CLIPVisionModelWithProjection(SCREAMING_SNAKE_CASE)
return model
@property
def A ( self : int):
_A : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=SCREAMING_SNAKE_CASE , do_normalize=SCREAMING_SNAKE_CASE , do_resize=SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def A ( self : Optional[Any]):
_A : Optional[int] = self.dummy_prior
_A : Dict = self.dummy_image_encoder
_A : Dict = self.dummy_text_encoder
_A : str = self.dummy_tokenizer
_A : Optional[Any] = self.dummy_image_processor
_A : Optional[Any] = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=SCREAMING_SNAKE_CASE , clip_sample_range=10.0 , )
_A : Dict = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def A ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str]=0):
if str(SCREAMING_SNAKE_CASE).startswith('mps'):
_A : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE)
else:
_A : int = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE)
_A : List[Any] = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def A ( self : List[Any]):
_A : str = 'cpu'
_A : Tuple = self.get_dummy_components()
_A : List[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE)
_A : Any = pipe.to(SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE)
_A : Dict = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE))
_A : str = output.image_embeds
_A : Optional[int] = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE) , return_dict=SCREAMING_SNAKE_CASE , )[0]
_A : Optional[int] = image[0, -10:]
_A : int = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_A : Dict = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def A ( self : Any):
_A : Tuple = torch_device == 'cpu'
_A : Optional[int] = True
_A : Tuple = False
self._test_inference_batch_single_identical(
test_max_difference=SCREAMING_SNAKE_CASE , relax_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
@skip_mps
def A ( self : int):
_A : Tuple = torch_device == 'cpu'
_A : Optional[Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=SCREAMING_SNAKE_CASE , test_mean_pixel_difference=SCREAMING_SNAKE_CASE , )
| 227
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = """philschmid/bart-large-cnn-samsum"""
_lowerCamelCase = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_lowerCamelCase = """summarizer"""
_lowerCamelCase = AutoTokenizer
_lowerCamelCase = AutoModelForSeqaSeqLM
_lowerCamelCase = ["""text"""]
_lowerCamelCase = ["""text"""]
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.pre_processor(UpperCAmelCase__ , return_tensors="pt" , truncation=UpperCAmelCase__ )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.model.generate(**UpperCAmelCase__ )[0]
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
return self.pre_processor.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ )
| 55
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowercase ( _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = []
for part_id in partition_order:
SCREAMING_SNAKE_CASE : Tuple = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(_A ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : str = spark.range(100 ).repartition(1 )
SCREAMING_SNAKE_CASE : str = Spark(_A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : Tuple = spark.range(10 ).repartition(2 )
SCREAMING_SNAKE_CASE : Any = [1, 0]
SCREAMING_SNAKE_CASE : Dict = _generate_iterable_examples(_A , _A ) # Reverse the partitions.
SCREAMING_SNAKE_CASE : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(_A , _A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : List[str] = spark.range(10 ).repartition(1 )
SCREAMING_SNAKE_CASE : Optional[Any] = SparkExamplesIterable(_A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_A ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> Any:
SCREAMING_SNAKE_CASE : Tuple = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : Any = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
SCREAMING_SNAKE_CASE : int = lambda _A : x.reverse()
SCREAMING_SNAKE_CASE : int = _get_expected_row_ids_and_row_dicts_for_partition_order(_A , [2, 1, 0] )
SCREAMING_SNAKE_CASE : Any = SparkExamplesIterable(_A ).shuffle_data_sources(_A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> str:
SCREAMING_SNAKE_CASE : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : Optional[Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
SCREAMING_SNAKE_CASE : str = SparkExamplesIterable(_A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_A , [0, 2] )
for i, (row_id, row_dict) in enumerate(_A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
SCREAMING_SNAKE_CASE : int = SparkExamplesIterable(_A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
SCREAMING_SNAKE_CASE : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_A , [1, 3] )
for i, (row_id, row_dict) in enumerate(_A ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowercase ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
SCREAMING_SNAKE_CASE : str = spark.range(100 ).repartition(1 )
SCREAMING_SNAKE_CASE : Any = Spark(_A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 245
| 0
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]:
snake_case_ : Optional[int] = parent
snake_case_ : List[Any] = batch_size
snake_case_ : Optional[int] = seq_length
snake_case_ : Any = is_training
snake_case_ : str = use_input_lengths
snake_case_ : Tuple = use_token_type_ids
snake_case_ : List[str] = use_labels
snake_case_ : int = gelu_activation
snake_case_ : Union[str, Any] = sinusoidal_embeddings
snake_case_ : Optional[Any] = causal
snake_case_ : Dict = asm
snake_case_ : Optional[int] = n_langs
snake_case_ : Optional[Any] = vocab_size
snake_case_ : Optional[int] = n_special
snake_case_ : Optional[int] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : str = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : List[Any] = type_sequence_label_size
snake_case_ : Any = initializer_range
snake_case_ : Any = num_labels
snake_case_ : Dict = num_choices
snake_case_ : Union[str, Any] = summary_type
snake_case_ : Optional[int] = use_proj
snake_case_ : List[Any] = scope
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : int = None
if self.use_input_lengths:
snake_case_ : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case_ : int = None
if self.use_token_type_ids:
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case_ : str = None
snake_case_ : Union[str, Any] = None
snake_case_ : Optional[Any] = None
if self.use_labels:
snake_case_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Optional[int] = ids_tensor([self.batch_size] , 2 ).float()
snake_case_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self ) -> int:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
snake_case_ : Any = FlaubertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Any = model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
snake_case_ : str = model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> int:
snake_case_ : Optional[int] = FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : List[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[int]:
snake_case_ : Dict = FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : int = model(_SCREAMING_SNAKE_CASE )
snake_case_ : Any = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Tuple:
snake_case_ : Dict = FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : List[str] = model(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
snake_case_ : Any = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
((snake_case_) , ) : Dict = result_with_labels.to_tuple()
snake_case_ : Tuple = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
((snake_case_) , ) : List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
snake_case_ : int = FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Tuple:
snake_case_ : Optional[int] = self.num_labels
snake_case_ : List[str] = FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Optional[int] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
snake_case_ : Any = self.num_choices
snake_case_ : Dict = FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ : Tuple = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) : Optional[Any] = config_and_inputs
snake_case_ : Dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : List[str] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
snake_case_ : Tuple = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
snake_case_ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
snake_case_ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : Tuple = FlaubertModelTester(self )
snake_case_ : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def _lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[int]:
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> int:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Any = FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ , snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
snake_case_ : Optional[Any] = True
snake_case_ : Dict = model_class(config=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : str = torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , "traced_model.pt" ) )
snake_case_ : str = torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , "traced_model.pt" ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict["input_ids"].to(_SCREAMING_SNAKE_CASE ) , inputs_dict["attention_mask"].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase ( self ) -> int:
snake_case_ : int = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
snake_case_ : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
snake_case_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )[0]
snake_case_ : Dict = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 36
|
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase : List[Any] = logging.get_logger(__name__)
lowercase : List[Any] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Tuple = 'conditional_detr'
A : Optional[int] = ['past_key_values']
A : List[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.25 , **_SCREAMING_SNAKE_CASE , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case_ : List[Any] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : Optional[int] = backbone_config.get("model_type" )
snake_case_ : str = CONFIG_MAPPING[backbone_model_type]
snake_case_ : Tuple = config_class.from_dict(_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[Any] = use_timm_backbone
snake_case_ : Optional[Any] = backbone_config
snake_case_ : str = num_channels
snake_case_ : Optional[Any] = num_queries
snake_case_ : Optional[Any] = d_model
snake_case_ : Optional[Any] = encoder_ffn_dim
snake_case_ : str = encoder_layers
snake_case_ : int = encoder_attention_heads
snake_case_ : int = decoder_ffn_dim
snake_case_ : Optional[Any] = decoder_layers
snake_case_ : List[str] = decoder_attention_heads
snake_case_ : List[str] = dropout
snake_case_ : Optional[int] = attention_dropout
snake_case_ : Tuple = activation_dropout
snake_case_ : List[Any] = activation_function
snake_case_ : Dict = init_std
snake_case_ : str = init_xavier_std
snake_case_ : Tuple = encoder_layerdrop
snake_case_ : int = decoder_layerdrop
snake_case_ : List[Any] = encoder_layers
snake_case_ : int = auxiliary_loss
snake_case_ : int = position_embedding_type
snake_case_ : List[str] = backbone
snake_case_ : Union[str, Any] = use_pretrained_backbone
snake_case_ : Optional[Any] = dilation
# Hungarian matcher
snake_case_ : Tuple = class_cost
snake_case_ : Tuple = bbox_cost
snake_case_ : str = giou_cost
# Loss coefficients
snake_case_ : Union[str, Any] = mask_loss_coefficient
snake_case_ : Tuple = dice_loss_coefficient
snake_case_ : List[str] = cls_loss_coefficient
snake_case_ : List[str] = bbox_loss_coefficient
snake_case_ : List[str] = giou_loss_coefficient
snake_case_ : Any = focal_alpha
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self ) -> int:
return self.d_model
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : List[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case_ : Optional[int] = self.backbone_config.to_dict()
snake_case_ : Optional[int] = self.__class__.model_type
return output
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = version.parse('1.11' )
@property
def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def _lowerCAmelCase ( self ) -> float:
return 1e-5
@property
def _lowerCAmelCase ( self ) -> int:
return 12
| 36
| 1
|
a_ = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 340
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class lowercase__ ( _UpperCAmelCase ):
a_ ="""char"""
a_ ="""bpe"""
a_ ="""wp"""
a_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class lowercase__ ( _UpperCAmelCase ):
a_ =["""image_processor""", """char_tokenizer"""]
a_ ="""ViTImageProcessor"""
a_ ="""MgpstrTokenizer"""
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> str:
'''simple docstring'''
lowerCAmelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
lowerCAmelCase__ = kwargs.pop("feature_extractor" )
lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = AutoTokenizer.from_pretrained("gpt2" )
lowerCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase )-> List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
lowerCAmelCase__ = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None:
lowerCAmelCase__ = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = sequences
lowerCAmelCase__ = char_preds.size(0 )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "char" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "bpe" )
lowerCAmelCase__ , lowerCAmelCase__ = self._decode_helper(__UpperCAmelCase , "wp" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i in range(__UpperCAmelCase ):
lowerCAmelCase__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase__ = scores.index(max(__UpperCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase__ = {}
lowerCAmelCase__ = final_strs
lowerCAmelCase__ = final_scores
lowerCAmelCase__ = char_strs
lowerCAmelCase__ = bpe_strs
lowerCAmelCase__ = wp_strs
return out
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowerCAmelCase__ = self.char_decode
lowerCAmelCase__ = 1
lowerCAmelCase__ = "[s]"
elif format == DecodeType.BPE:
lowerCAmelCase__ = self.bpe_decode
lowerCAmelCase__ = 2
lowerCAmelCase__ = "#"
elif format == DecodeType.WORDPIECE:
lowerCAmelCase__ = self.wp_decode
lowerCAmelCase__ = 102
lowerCAmelCase__ = "[SEP]"
else:
raise ValueError(F"Format {format} is not supported." )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
lowerCAmelCase__ = pred_logits.size(0 )
lowerCAmelCase__ = pred_logits.size(1 )
lowerCAmelCase__ , lowerCAmelCase__ = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase )
lowerCAmelCase__ = preds_index.view(-1 , __UpperCAmelCase )[:, 1:]
lowerCAmelCase__ = decoder(__UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ = torch.nn.functional.softmax(__UpperCAmelCase , dim=2 ).max(dim=2 )
lowerCAmelCase__ = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase ):
lowerCAmelCase__ = preds_str[index].find(__UpperCAmelCase )
lowerCAmelCase__ = preds_str[index][:pred_eos]
lowerCAmelCase__ = preds_index[index].cpu().tolist()
lowerCAmelCase__ = pred_index.index(__UpperCAmelCase ) if eos_token in pred_index else -1
lowerCAmelCase__ = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase )
conf_scores.append(__UpperCAmelCase )
return dec_strs, conf_scores
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[Any]:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase )]
return decode_strs
| 340
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = WavaVecaForSequenceClassification.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase )
lowercase_ = downstream_dict["""projector.weight"""]
lowercase_ = downstream_dict["""projector.bias"""]
lowercase_ = downstream_dict["""model.post_net.linear.weight"""]
lowercase_ = downstream_dict["""model.post_net.linear.bias"""]
return model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = WavaVecaForAudioFrameClassification.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase )
lowercase_ = downstream_dict["""model.linear.weight"""]
lowercase_ = downstream_dict["""model.linear.bias"""]
return model
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = WavaVecaForXVector.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase )
lowercase_ = downstream_dict["""connector.weight"""]
lowercase_ = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase_ = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
lowercase_ = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
lowercase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
lowercase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
lowercase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
lowercase_ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
lowercase_ = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
lowercase_ = checkpoint["""Downstream"""]
lowercase_ = WavaVecaConfig.from_pretrained(__lowerCAmelCase )
lowercase_ = WavaVecaFeatureExtractor.from_pretrained(
__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , do_normalize=__lowerCAmelCase )
lowercase_ = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
lowercase_ = convert_classification(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
lowercase_ = convert_diarization(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
elif arch.endswith("""ForXVector""" ):
lowercase_ = convert_xvector(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
lowercase_ = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
UpperCAmelCase : Optional[int] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 313
|
"""simple docstring"""
from collections.abc import Sequence
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase_ = 0 if allow_empty_subarrays else float("""-inf""" )
lowercase_ = 0.0
for num in arr:
lowercase_ = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase_ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Union[str, Any] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"{max_subarray_sum(nums) = }")
| 313
| 1
|
import os
import numpy
import onnx
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = a.name
__lowerCamelCase = b.name
__lowerCamelCase = """"""
__lowerCamelCase = """"""
__lowerCamelCase = a == b
__lowerCamelCase = name_a
__lowerCamelCase = name_b
return res
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__lowerCamelCase = list(model.graph.initializer )
__lowerCamelCase = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowerCamelCase = inits[i].name
__lowerCamelCase = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = os.path.dirname(lowerCAmelCase_ )
__lowerCamelCase = os.path.basename(lowerCAmelCase_ )
__lowerCamelCase = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCamelCase = list(model.graph.initializer )
__lowerCamelCase = set()
__lowerCamelCase = {}
__lowerCamelCase = []
__lowerCamelCase = 0
for i in range(len(lowerCAmelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCAmelCase_ )
dup_set.add(lowerCAmelCase_ )
__lowerCamelCase = inits[j].data_type
__lowerCamelCase = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCAmelCase_ )
total_reduced_size += mem_size
__lowerCamelCase = inits[i].name
__lowerCamelCase = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCAmelCase_ )
else:
__lowerCamelCase = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
__lowerCamelCase = sorted(lowerCAmelCase_ )
_remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCamelCase = """optimized_""" + model_file_name
__lowerCamelCase = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
onnx.save(lowerCAmelCase_ , lowerCAmelCase_ )
return new_model
| 90
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : int = '''▁'''
lowerCamelCase : Optional[int] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
lowerCamelCase : str = {
'''google/pegasus-xsum''': 5_12,
}
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Tuple = VOCAB_FILES_NAMES
_A : Tuple = VOCAB_FILES_NAMES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , __a : int , __a : Any="<pad>" , __a : Optional[int]="</s>" , __a : Union[str, Any]="<unk>" , __a : Optional[int]="<mask_2>" , __a : Optional[int]="<mask_1>" , __a : Dict=None , __a : List[str]=103 , __a : Optional[Dict[str, Any]] = None , **__a : List[Any] , ) -> None:
"""simple docstring"""
__lowercase : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(__a , __a ):
raise TypeError(
F"additional_special_tokens should be of type {type(__a )}, but is"
F" {type(__a )}" )
__lowercase : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(__a ) , self.offset - 1 )
]
if len(set(__a ) ) != len(__a ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
__lowercase : Optional[Any] = additional_special_tokens_extended
else:
__lowercase : int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
__lowercase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__a , unk_token=__a , mask_token=__a , pad_token=__a , mask_token_sent=__a , offset=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowercase : Optional[Any] = mask_token_sent
__lowercase : Dict = vocab_file
__lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
# add special tokens to encoder dict
__lowercase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__lowercase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowerCAmelCase ( self : int ) -> Dict[str, int]:
"""simple docstring"""
__lowercase : Any = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.__dict__.copy()
__lowercase : Optional[Any] = None
return state
def __setstate__( self : Tuple , __a : Any ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase : List[str] = {}
__lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Dict , __a : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__a , out_type=__a )
def lowerCAmelCase ( self : List[str] , __a : str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowercase : Union[str, Any] = self.sp_model.piece_to_id(__a )
return sp_id + self.offset
def lowerCAmelCase ( self : Dict , __a : int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowercase : List[Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCAmelCase ( self : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = []
__lowercase : Tuple = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__a ) + token
__lowercase : str = []
else:
current_sub_tokens.append(__a )
out_string += self.sp_model.decode(__a )
return out_string.strip()
def lowerCAmelCase ( self : int , __a : Optional[Any]=False ) -> int:
"""simple docstring"""
return 1
def lowerCAmelCase ( self : Optional[int] , __a : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase ( self : Union[str, Any] , __a : List , __a : Optional[List] = None , __a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__a )
elif token_ids_a is None:
return self._special_token_mask(__a ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Tuple=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : Tuple , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : Optional[int] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
__lowercase : Any = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 233
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __a :
@staticmethod
def snake_case_ ( *a__ , **a__ ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __a ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def snake_case_ ( self , a__ , a__ , a__ ):
_lowerCamelCase = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
_lowerCamelCase = [
{
'''image''': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def snake_case_ ( self , a__ , a__ ):
_lowerCamelCase = vqa_pipeline(snake_case__ , top_k=1 )
self.assertEqual(
snake_case__ , [
[{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ )}],
[{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ )}],
] , )
@require_torch
def snake_case_ ( self ):
_lowerCamelCase = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
_lowerCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_lowerCamelCase = '''How many cats are there?'''
_lowerCamelCase = vqa_pipeline(image=snake_case__ , question='How many cats are there?' , top_k=2 )
self.assertEqual(
snake_case__ , [{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ )}, {'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ )}] )
_lowerCamelCase = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
snake_case__ , [{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ )}, {'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ )}] )
@slow
@require_torch
def snake_case_ ( self ):
_lowerCamelCase = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
_lowerCamelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_lowerCamelCase = '''How many cats are there?'''
_lowerCamelCase = vqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
_lowerCamelCase = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] )
_lowerCamelCase = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [[{'score': 0.8799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def snake_case_ ( self ):
pass
| 362
|
"""simple docstring"""
import numpy as np
def SCREAMING_SNAKE_CASE_ ( snake_case : np.ndarray )-> np.ndarray:
return 1 / (1 + np.exp(-vector ))
def SCREAMING_SNAKE_CASE_ ( snake_case : np.ndarray )-> np.ndarray:
return vector * sigmoid(snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80
| 0
|
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if gpta_config_file == "":
snake_case = GPTaConfig()
else:
snake_case = GPTaConfig.from_json_file(UpperCamelCase_ )
snake_case = GPTaModel(UpperCamelCase_ )
# Load weights from numpy
load_tf_weights_in_gpta(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ )
# Save pytorch-model
snake_case = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() ,UpperCamelCase_ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCamelCase_ ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 127
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A__ :
"""simple docstring"""
__magic_name__ = XGLMConfig
__magic_name__ = {}
__magic_name__ = 'gelu'
def __init__( self , __snake_case , __snake_case=1_4 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=9_9 , __snake_case=3_2 , __snake_case=2 , __snake_case=4 , __snake_case=3_7 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_1_2 , __snake_case=0.02 , ):
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_labels
snake_case = vocab_size
snake_case = d_model
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = ffn_dim
snake_case = activation_function
snake_case = activation_dropout
snake_case = attention_dropout
snake_case = max_position_embeddings
snake_case = initializer_range
snake_case = None
snake_case = 0
snake_case = 2
snake_case = 1
def a_ ( self ):
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def a_ ( self ):
snake_case = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = self.get_config()
snake_case = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def a_ ( self ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__snake_case , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__snake_case , )
def a_ ( self ):
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class A__ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__magic_name__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
__magic_name__ = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a_ ( self ):
snake_case = TFXGLMModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , n_embd=3_7 )
def a_ ( self ):
self.config_tester.run_common_tests()
@slow
def a_ ( self ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = TFXGLMModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def a_ ( self ):
super().test_resize_token_embeddings()
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def a_ ( self , __snake_case=True ):
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
snake_case = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
snake_case = model.generate(__snake_case , do_sample=__snake_case , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __snake_case )
@slow
def a_ ( self ):
snake_case = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
snake_case = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
snake_case = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
snake_case = model.generate(__snake_case , do_sample=__snake_case , seed=[7, 0] )
snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=__snake_case )
snake_case = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(__snake_case , __snake_case )
@slow
def a_ ( self ):
snake_case = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
snake_case = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
snake_case = '''left'''
# use different length sentences to test batching
snake_case = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
snake_case = tokenizer(__snake_case , return_tensors='''tf''' , padding=__snake_case )
snake_case = inputs['''input_ids''']
snake_case = model.generate(input_ids=__snake_case , attention_mask=inputs['''attention_mask'''] , max_new_tokens=1_2 )
snake_case = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
snake_case = model.generate(input_ids=__snake_case , max_new_tokens=1_2 )
snake_case = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
snake_case = model.generate(input_ids=__snake_case , max_new_tokens=1_2 )
snake_case = tokenizer.batch_decode(__snake_case , skip_special_tokens=__snake_case )
snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__snake_case )
snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=__snake_case )
snake_case = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(__snake_case , __snake_case )
self.assertListEqual(__snake_case , [non_padded_sentence, padded_sentence] )
| 127
| 1
|
from random import shuffle
import tensorflow as tf
from numpy import array
def _A ( lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = int(lowercase__ )
assert noofclusters < len(lowercase__ )
# Find out the dimensionality
lowerCAmelCase__ = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowerCAmelCase__ = list(range(len(lowercase__ ) ) )
shuffle(lowercase__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowerCAmelCase__ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowerCAmelCase__ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowerCAmelCase__ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowerCAmelCase__ = tf.placeholder("float64" , [dim] )
lowerCAmelCase__ = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase__ , lowercase__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowerCAmelCase__ = [tf.Variable(0 ) for i in range(len(lowercase__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowerCAmelCase__ = tf.placeholder("int32" )
lowerCAmelCase__ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase__ , lowercase__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowerCAmelCase__ = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowerCAmelCase__ = tf.reduce_mean(lowercase__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowerCAmelCase__ = tf.placeholder("float" , [dim] )
lowerCAmelCase__ = tf.placeholder("float" , [dim] )
lowerCAmelCase__ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase__ , lowercase__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowerCAmelCase__ = tf.placeholder("float" , [noofclusters] )
lowerCAmelCase__ = tf.argmin(lowercase__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowerCAmelCase__ = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowerCAmelCase__ = 100
for _ in range(lowercase__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase__ ) ):
lowerCAmelCase__ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowerCAmelCase__ = [
sess.run(lowercase__ , feed_dict={va: vect, va: sess.run(lowercase__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowerCAmelCase__ = sess.run(
lowercase__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase__ ):
# Collect all the vectors assigned to this cluster
lowerCAmelCase__ = [
vectors[i]
for i in range(len(lowercase__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowerCAmelCase__ = sess.run(
lowercase__ , feed_dict={mean_input: array(lowercase__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowerCAmelCase__ = sess.run(lowercase__ )
lowerCAmelCase__ = sess.run(lowercase__ )
return centroids, assignments
| 362
|
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = 'PoolFormerConfig'
# Base docstring
UpperCamelCase = 'sail/poolformer_s12'
UpperCamelCase = [1, 512, 7, 7]
# Image classification docstring
UpperCamelCase = 'sail/poolformer_s12'
UpperCamelCase = 'tabby, tabby cat'
UpperCamelCase = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
lowerCAmelCase__ = 1 - drop_prob
lowerCAmelCase__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowerCAmelCase__ = keep_prob + torch.rand(lowerCAmelCase_ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
lowerCAmelCase__ = input.div(lowerCAmelCase_ ) * random_tensor
return output
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[float] = None ) -> None:
super().__init__()
lowerCAmelCase__ = drop_prob
def a ( self : str , SCREAMING_SNAKE_CASE__ : torch.Tensor ) -> torch.Tensor:
return drop_path(SCREAMING_SNAKE_CASE__ , self.drop_prob , self.training )
def a ( self : Optional[Any] ) -> str:
return "p={}".format(self.drop_prob )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str=None ) -> Optional[Any]:
super().__init__()
lowerCAmelCase__ = patch_size if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (patch_size, patch_size)
lowerCAmelCase__ = stride if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (stride, stride)
lowerCAmelCase__ = padding if isinstance(SCREAMING_SNAKE_CASE__ , collections.abc.Iterable ) else (padding, padding)
lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = norm_layer(SCREAMING_SNAKE_CASE__ ) if norm_layer else nn.Identity()
def a ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
lowerCAmelCase__ = self.projection(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.norm(SCREAMING_SNAKE_CASE__ )
return embeddings
class __lowerCamelCase ( nn.GroupNorm ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
super().__init__(1 , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = nn.AvgPoolad(SCREAMING_SNAKE_CASE__ , stride=1 , padding=pool_size // 2 , count_include_pad=SCREAMING_SNAKE_CASE__ )
def a ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return self.pool(SCREAMING_SNAKE_CASE__ ) - hidden_states
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
super().__init__()
lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
lowerCAmelCase__ = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1 )
lowerCAmelCase__ = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ )
if isinstance(config.hidden_act , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = ACTaFN[config.hidden_act]
else:
lowerCAmelCase__ = config.hidden_act
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
lowerCAmelCase__ = self.conva(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.act_fn(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.drop(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.conva(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.drop(SCREAMING_SNAKE_CASE__ )
return hidden_states
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
super().__init__()
lowerCAmelCase__ = PoolFormerPooling(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PoolFormerOutput(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = PoolFormerGroupNorm(SCREAMING_SNAKE_CASE__ )
# Useful for training neural nets
lowerCAmelCase__ = PoolFormerDropPath(SCREAMING_SNAKE_CASE__ ) if drop_path > 0.0 else nn.Identity()
lowerCAmelCase__ = config.use_layer_scale
if config.use_layer_scale:
lowerCAmelCase__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((SCREAMING_SNAKE_CASE__) ) , requires_grad=SCREAMING_SNAKE_CASE__ )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : str ) -> int:
if self.use_layer_scale:
lowerCAmelCase__ = self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowerCAmelCase__ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = ()
lowerCAmelCase__ = self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowerCAmelCase__ = hidden_states + self.drop_path(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = (output,) + outputs
return outputs
else:
lowerCAmelCase__ = self.drop_path(self.pooling(self.before_norm(SCREAMING_SNAKE_CASE__ ) ) )
# First residual connection
lowerCAmelCase__ = pooling_output + hidden_states
lowerCAmelCase__ = ()
# Second residual connection inside the PoolFormerOutput block
lowerCAmelCase__ = self.drop_path(self.output(self.after_norm(SCREAMING_SNAKE_CASE__ ) ) )
lowerCAmelCase__ = hidden_states + layer_output
lowerCAmelCase__ = (output,) + outputs
return outputs
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
super().__init__()
lowerCAmelCase__ = config
# stochastic depth decay rule
lowerCAmelCase__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowerCAmelCase__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowerCAmelCase__ = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
# Transformer blocks
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowerCAmelCase__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
SCREAMING_SNAKE_CASE__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(SCREAMING_SNAKE_CASE__ ) )
lowerCAmelCase__ = nn.ModuleList(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : List[str]=True ) -> Dict:
lowerCAmelCase__ = () if output_hidden_states else None
lowerCAmelCase__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowerCAmelCase__ , lowerCAmelCase__ = layers
# Get patch embeddings from hidden_states
lowerCAmelCase__ = embedding_layer(SCREAMING_SNAKE_CASE__ )
# Send the embeddings through the blocks
for _, blk in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = blk(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = layer_outputs[0]
if output_hidden_states:
lowerCAmelCase__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = PoolFormerConfig
snake_case__ = "poolformer"
snake_case__ = "pixel_values"
snake_case__ = True
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def a ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> Tuple:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ = value
UpperCamelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCamelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCamelCase__ , )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
super().__init__(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = config
lowerCAmelCase__ = PoolFormerEncoder(SCREAMING_SNAKE_CASE__ )
# Initialize weights and apply final processing
self.post_init()
def a ( self : Optional[int] ) -> Optional[Any]:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
lowerCAmelCase__ = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
super().__init__()
lowerCAmelCase__ = nn.Linear(config.hidden_size , config.hidden_size )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
lowerCAmelCase__ = self.dense(SCREAMING_SNAKE_CASE__ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCamelCase__ , )
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
super().__init__(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = PoolFormerModel(SCREAMING_SNAKE_CASE__ )
# Final norm
lowerCAmelCase__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowerCAmelCase__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.poolformer(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = outputs[0]
lowerCAmelCase__ = self.classifier(self.norm(SCREAMING_SNAKE_CASE__ ).mean([-2, -1] ) )
lowerCAmelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ = "single_label_classification"
else:
lowerCAmelCase__ = "multi_label_classification"
if self.config.problem_type == "regression":
lowerCAmelCase__ = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ = CrossEntropyLoss()
lowerCAmelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ = BCEWithLogitsLoss()
lowerCAmelCase__ = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
| 221
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Dict:
UpperCAmelCase_: int = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_: Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
UpperCAmelCase_: Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase_: Dict = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
UpperCAmelCase_: int = os.path.join(self.tmpdirname, SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file, """w""", encoding="""utf-8""" ) as fp:
json.dump(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, **SCREAMING_SNAKE_CASE_ ) -> Dict:
return BertTokenizer.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, **SCREAMING_SNAKE_CASE_ ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> int:
shutil.rmtree(self.tmpdirname )
def __snake_case (self ) -> int:
UpperCAmelCase_: Any = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCAmelCase_: Any = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_, 0, -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: List[str] = self.get_tokenizer()
UpperCAmelCase_: str = self.get_image_processor()
UpperCAmelCase_: Any = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_: Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> str:
UpperCAmelCase_: Dict = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_: str = self.get_tokenizer(bos_token="""(BOS)""", eos_token="""(EOS)""" )
UpperCAmelCase_: Union[str, Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_, padding_value=1.0 )
UpperCAmelCase_: Dict = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname, bos_token="""(BOS)""", eos_token="""(EOS)""", do_normalize=SCREAMING_SNAKE_CASE_, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Dict:
UpperCAmelCase_: str = self.get_image_processor()
UpperCAmelCase_: Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_: Any = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = self.prepare_image_inputs()
UpperCAmelCase_: Optional[Any] = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""np""" )
UpperCAmelCase_: Tuple = processor(images=SCREAMING_SNAKE_CASE_, return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2 )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: List[Any] = self.get_image_processor()
UpperCAmelCase_: List[Any] = self.get_tokenizer()
UpperCAmelCase_: Any = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = """lower newer"""
UpperCAmelCase_: Dict = processor(text=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def __snake_case (self ) -> int:
UpperCAmelCase_: int = self.get_image_processor()
UpperCAmelCase_: int = self.get_tokenizer()
UpperCAmelCase_: List[Any] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = """lower newer"""
UpperCAmelCase_: Any = self.prepare_image_inputs()
UpperCAmelCase_: Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_, images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ), ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
processor()
def __snake_case (self ) -> str:
UpperCAmelCase_: Dict = self.get_image_processor()
UpperCAmelCase_: Any = self.get_tokenizer()
UpperCAmelCase_: List[Any] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_: Union[str, Any] = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: Union[str, Any] = self.get_image_processor()
UpperCAmelCase_: Tuple = self.get_tokenizer()
UpperCAmelCase_: Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_, image_processor=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = """lower newer"""
UpperCAmelCase_: List[str] = self.prepare_image_inputs()
UpperCAmelCase_: Any = processor(text=SCREAMING_SNAKE_CASE_, images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 147
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _a ( unittest.TestCase ):
def __snake_case (self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case (self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_: Any = UNetaDModel(
sample_size=(32, 64), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""AttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """AttnUpBlock2D"""), )
return model
@property
def __snake_case (self ) -> List[Any]:
torch.manual_seed(0 )
UpperCAmelCase_: Optional[int] = UNetaDConditionModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D"""), cross_attention_dim=10, )
return model
@property
def __snake_case (self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase_: Optional[int] = AutoencoderKL(
sample_size=(128, 64), in_channels=1, out_channels=1, latent_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D"""), up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D"""), )
UpperCAmelCase_: Optional[Any] = UNetaDModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("""AttnDownBlock2D""", """DownBlock2D"""), up_block_types=("""UpBlock2D""", """AttnUpBlock2D"""), )
return vqvae, unet
@slow
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: str = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: str = Mel(
x_res=self.dummy_unet.config.sample_size[1], y_res=self.dummy_unet.config.sample_size[0], )
UpperCAmelCase_: Tuple = DDPMScheduler()
UpperCAmelCase_: List[Any] = AudioDiffusionPipeline(vqvae=SCREAMING_SNAKE_CASE_, unet=self.dummy_unet, mel=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: str = pipe(generator=SCREAMING_SNAKE_CASE_, steps=4 )
UpperCAmelCase_: Optional[Any] = output.audios[0]
UpperCAmelCase_: Optional[int] = output.images[0]
UpperCAmelCase_: Dict = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: Dict = pipe(generator=SCREAMING_SNAKE_CASE_, steps=4, return_dict=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_: Optional[Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: List[Any] = np.frombuffer(image_from_tuple.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Dict = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_: int = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], )
UpperCAmelCase_: List[str] = DDIMScheduler()
UpperCAmelCase_: int = self.dummy_vqvae_and_unet
UpperCAmelCase_: Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
UpperCAmelCase_: Dict = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase_: List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: Dict = pipe(raw_audio=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, start_step=5, steps=10 )
UpperCAmelCase_: Any = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_: Union[str, Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Union[str, Any] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_: Union[str, Any] = self.dummy_unet_condition
UpperCAmelCase_: Union[str, Any] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=SCREAMING_SNAKE_CASE_, mel=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
np.random.seed(0 )
UpperCAmelCase_: List[str] = torch.rand((1, 1, 10) )
UpperCAmelCase_: Optional[int] = pipe(generator=SCREAMING_SNAKE_CASE_, encoding=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = output.images[0]
UpperCAmelCase_: Any = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Any = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: str = torch_device
UpperCAmelCase_: Any = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
UpperCAmelCase_: Dict = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(42 )
UpperCAmelCase_: Union[str, Any] = pipe(generator=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = output.audios[0]
UpperCAmelCase_: Optional[Any] = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_: Optional[Any] = np.frombuffer(image.tobytes(), dtype="""uint8""" )[:10]
UpperCAmelCase_: Any = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 147
| 1
|
'''simple docstring'''
def __lowercase ( __lowercase ) -> bool:
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
_A = 0
_A = str(__lowercase )
while len(__lowercase ) != 1:
_A = [int(__lowercase ) for i in num_string]
_A = 1
for i in range(0 , len(__lowercase ) ):
total *= numbers[i]
_A = str(__lowercase )
steps += 1
return steps
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
_A = 0
_A = str(__lowercase )
while len(__lowercase ) != 1:
_A = [int(__lowercase ) for i in num_string]
_A = 0
for i in range(0 , len(__lowercase ) ):
total += numbers[i]
_A = str(__lowercase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : List[str] = "levit"
def __init__( self, SCREAMING_SNAKE_CASE_=224, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=[128, 256, 384], SCREAMING_SNAKE_CASE_=[4, 8, 12], SCREAMING_SNAKE_CASE_=[4, 4, 4], SCREAMING_SNAKE_CASE_=[16, 16, 16], SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=[2, 2, 2], SCREAMING_SNAKE_CASE_=[2, 2, 2], SCREAMING_SNAKE_CASE_=0.02, **SCREAMING_SNAKE_CASE_, ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = image_size
UpperCamelCase : str = num_channels
UpperCamelCase : List[Any] = kernel_size
UpperCamelCase : Any = stride
UpperCamelCase : List[Any] = padding
UpperCamelCase : Optional[int] = hidden_sizes
UpperCamelCase : Optional[int] = num_attention_heads
UpperCamelCase : List[Any] = depths
UpperCamelCase : str = key_dim
UpperCamelCase : Tuple = drop_path_rate
UpperCamelCase : List[str] = patch_size
UpperCamelCase : List[str] = attention_ratio
UpperCamelCase : Optional[int] = mlp_ratio
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : List[str] = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Optional[Any] = version.parse("1.11" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case_ ( self ) -> float:
return 1e-4
| 119
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Any=0.999 , snake_case__ : List[Any]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : int ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCamelCase : List[Any] = []
for i in range(snake_case__ ):
UpperCamelCase : Optional[Any] = i / num_diffusion_timesteps
UpperCamelCase : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( a__ , a__ ):
UpperCAmelCase__ : List[Any] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : List[str] = 2
@register_to_config
def __init__( self, SCREAMING_SNAKE_CASE_ = 1000, SCREAMING_SNAKE_CASE_ = 0.0_00_85, SCREAMING_SNAKE_CASE_ = 0.0_12, SCREAMING_SNAKE_CASE_ = "linear", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "epsilon", SCREAMING_SNAKE_CASE_ = "linspace", SCREAMING_SNAKE_CASE_ = 0, ) -> List[str]:
if trained_betas is not None:
UpperCamelCase : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase : int = (
torch.linspace(beta_start**0.5, beta_end**0.5, SCREAMING_SNAKE_CASE_, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase : Optional[int] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCamelCase : Optional[int] = 1.0 - self.betas
UpperCamelCase : int = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> str:
if schedule_timesteps is None:
UpperCamelCase : Union[str, Any] = self.timesteps
UpperCamelCase : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase : Optional[int] = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
UpperCamelCase : str = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
UpperCamelCase : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case_ ( self ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> torch.FloatTensor:
UpperCamelCase : Optional[int] = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
if self.state_in_first_order:
UpperCamelCase : Dict = self.sigmas[step_index]
else:
UpperCamelCase : int = self.sigmas_interpol[step_index]
UpperCamelCase : Any = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, ) -> Optional[int]:
UpperCamelCase : Dict = num_inference_steps
UpperCamelCase : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase : int = np.linspace(0, num_train_timesteps - 1, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase : Optional[Any] = (np.arange(0, SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase : Optional[int] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase : Any = (np.arange(SCREAMING_SNAKE_CASE_, 0, -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCamelCase : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = np.interp(SCREAMING_SNAKE_CASE_, np.arange(0, len(SCREAMING_SNAKE_CASE_ ) ), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
# interpolate sigmas
UpperCamelCase : Union[str, Any] = sigmas.log().lerp(sigmas.roll(1 ).log(), 0.5 ).exp()
UpperCamelCase : Any = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase : Optional[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
# mps does not support float64
UpperCamelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
else:
UpperCamelCase : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
# interpolate timesteps
UpperCamelCase : int = self.sigma_to_t(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_, dtype=timesteps.dtype )
UpperCamelCase : List[str] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1 ).flatten()
UpperCamelCase : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCamelCase : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase : Dict = defaultdict(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# get log sigma
UpperCamelCase : List[Any] = sigma.log()
# get distribution
UpperCamelCase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCamelCase : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCamelCase : Tuple = low_idx + 1
UpperCamelCase : List[str] = self.log_sigmas[low_idx]
UpperCamelCase : Optional[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase : int = (low - log_sigma) / (low - high)
UpperCamelCase : Tuple = w.clamp(0, 1 )
# transform interpolation to time range
UpperCamelCase : List[str] = (1 - w) * low_idx + w * high_idx
UpperCamelCase : Dict = t.view(sigma.shape )
return t
@property
def snake_case_ ( self ) -> Optional[int]:
return self.sample is None
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True, ) -> Union[SchedulerOutput, Tuple]:
UpperCamelCase : str = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
UpperCamelCase : Optional[int] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase : Tuple = self.sigmas[step_index]
UpperCamelCase : Dict = self.sigmas_interpol[step_index + 1]
UpperCamelCase : Optional[int] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCamelCase : str = self.sigmas[step_index - 1]
UpperCamelCase : Dict = self.sigmas_interpol[step_index]
UpperCamelCase : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase : Dict = 0
UpperCamelCase : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase : Any = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase : List[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCamelCase : Tuple = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCamelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCamelCase : Dict = sigma_next - sigma_hat
UpperCamelCase : Any = self.sample
UpperCamelCase : str = None
UpperCamelCase : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase : Optional[Any] = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
UpperCamelCase : List[str] = self.timesteps.to(original_samples.device, dtype=torch.floataa )
UpperCamelCase : str = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
UpperCamelCase : Dict = self.timesteps.to(original_samples.device )
UpperCamelCase : int = timesteps.to(original_samples.device )
UpperCamelCase : str = [self.index_for_timestep(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for t in timesteps]
UpperCamelCase : List[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase : int = sigma.unsqueeze(-1 )
UpperCamelCase : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 119
| 1
|
from PIL import Image
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Image , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
def brightness(SCREAMING_SNAKE_CASE_ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
snake_case_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 216
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[Any] = """speech_to_text_2"""
__lowerCamelCase : str = ["""past_key_values"""]
__lowerCamelCase : List[Any] = {"""num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , a=1_0000 , a=6 , a=2048 , a=4 , a=0.0 , a=True , a="relu" , a=256 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=2 , a=True , a=1 , a=0 , a=2 , a=1024 , **a , ):
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[str] = d_model
lowercase__ : int = decoder_ffn_dim
lowercase__ : Optional[Any] = decoder_layers
lowercase__ : int = decoder_attention_heads
lowercase__ : Dict = dropout
lowercase__ : Optional[int] = attention_dropout
lowercase__ : Optional[int] = activation_dropout
lowercase__ : Optional[int] = activation_function
lowercase__ : Dict = init_std
lowercase__ : List[Any] = decoder_layerdrop
lowercase__ : int = use_cache
lowercase__ : Any = decoder_layers
lowercase__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : Tuple = max_target_positions
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 216
| 1
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
while second != 0:
_UpperCamelCase : str = first & second
first ^= second
_UpperCamelCase : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Union[str, Any] = int(input('Enter the first number: ').strip())
snake_case_ : int = int(input('Enter the second number: ').strip())
print(F"""{add(first, second) = }""")
| 83
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase__ ( unittest.TestCase ):
def __init__( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Dict=7 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=99 ,lowerCamelCase__ : int=32 ,lowerCamelCase__ : Tuple=5 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Any=37 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=512 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : int=4 ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Union[str, Any] = seq_length
_UpperCamelCase : Optional[Any] = is_training
_UpperCamelCase : Optional[int] = use_attention_mask
_UpperCamelCase : Any = use_token_type_ids
_UpperCamelCase : str = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : Optional[int] = type_vocab_size
_UpperCamelCase : str = type_sequence_label_size
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : List[Any] = num_choices
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=lowerCamelCase__ ,)
return config, input_ids, attention_mask
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = config_and_inputs
_UpperCamelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase : Dict = model_class_name.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase : Dict = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )[0]
_UpperCamelCase : Any = (1, 11, 768)
self.assertEqual(output.shape ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,lowerCamelCase__ ,atol=1E-4 ) )
| 83
| 1
|
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[int] = 'xlm-prophetnet'
__UpperCAmelCase : int = ['past_key_values']
__UpperCAmelCase : Optional[int] = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__(self : str , __UpperCAmelCase : Optional[float] = 0.1 , __UpperCAmelCase : Optional[Union[str, Callable]] = "gelu" , __UpperCAmelCase : Optional[int] = 3_0_5_2_2 , __UpperCAmelCase : Optional[int] = 1_0_2_4 , __UpperCAmelCase : Optional[int] = 4_0_9_6 , __UpperCAmelCase : Optional[int] = 1_2 , __UpperCAmelCase : Optional[int] = 1_6 , __UpperCAmelCase : Optional[int] = 4_0_9_6 , __UpperCAmelCase : Optional[int] = 1_2 , __UpperCAmelCase : Optional[int] = 1_6 , __UpperCAmelCase : Optional[float] = 0.1 , __UpperCAmelCase : Optional[float] = 0.1 , __UpperCAmelCase : Optional[int] = 5_1_2 , __UpperCAmelCase : Optional[float] = 0.02 , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[int] = 0 , __UpperCAmelCase : Optional[int] = 2 , __UpperCAmelCase : Optional[int] = 3_2 , __UpperCAmelCase : Optional[int] = 1_2_8 , __UpperCAmelCase : Optional[bool] = False , __UpperCAmelCase : Optional[float] = 0.0 , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[int] = 0 , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : Optional[int] = 2 , **__UpperCAmelCase : Optional[Any] , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = num_encoder_layers
UpperCAmelCase__ = num_encoder_attention_heads
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = num_decoder_layers
UpperCAmelCase__ = num_decoder_attention_heads
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = init_std # Normal(0, this parameter)
UpperCAmelCase__ = activation_function
# parameters for xlmprophetnet
UpperCAmelCase__ = ngram
UpperCAmelCase__ = num_buckets
UpperCAmelCase__ = relative_max_distance
UpperCAmelCase__ = disable_ngram_loss
UpperCAmelCase__ = eps
# 3 Types of Dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = dropout
UpperCAmelCase__ = use_cache
super().__init__(
pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , add_cross_attention=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
@property
def lowercase_ (self : int ) -> int:
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def lowercase_ (self : Any , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 143
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__(self : Tuple , __UpperCAmelCase : str , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 1_3
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = 9_9
UpperCAmelCase__ = 3_2
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3_7
UpperCAmelCase__ = "gelu"
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 5_1_2
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = None
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : str , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForMaskedLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForQuestionAnswering(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForSequenceClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFDistilBertForMultipleChoice(__UpperCAmelCase )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForTokenClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCAmelCase : Optional[int] = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : str = False
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , dim=3_7 )
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCAmelCase )
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCAmelCase )
def lowercase_ (self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase__ = TFDistilBertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__UpperCAmelCase )[0]
UpperCAmelCase__ = [1, 6, 7_6_8]
self.assertEqual(output.shape , __UpperCAmelCase )
UpperCAmelCase__ = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
| 143
| 1
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCAmelCase_ ( a):
def __init__( self, __a = "▁", __a = True, __a = "<unk>", __a = "</s>", __a = "<pad>", ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
_lowerCAmelCase : str = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
_lowerCAmelCase : Dict = token_dict["token"]
_lowerCAmelCase : int = Tokenizer(Unigram())
_lowerCAmelCase : Any = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}"), " "),
normalizers.Lowercase(),
])
_lowerCAmelCase : Tuple = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__a, add_prefix_space=__a),
pre_tokenizers.Digits(individual_digits=__a),
pre_tokenizers.Punctuation(),
])
_lowerCAmelCase : List[str] = decoders.Metaspace(replacement=__a, add_prefix_space=__a)
_lowerCAmelCase : Tuple = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}", special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], )
_lowerCAmelCase : List[Any] = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__a, __a)
def snake_case__ ( self, __a, __a = 8000, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Dict = trainers.UnigramTrainer(
vocab_size=__a, special_tokens=self.special_tokens_list, show_progress=__a, )
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [files]
self._tokenizer.train(__a, trainer=__a)
self.add_unk_id()
def snake_case__ ( self, __a, __a = 8000, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = trainers.UnigramTrainer(
vocab_size=__a, special_tokens=self.special_tokens_list, show_progress=__a, )
self._tokenizer.train_from_iterator(__a, trainer=__a)
self.add_unk_id()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = json.loads(self._tokenizer.to_str())
_lowerCAmelCase : Tuple = self.special_tokens["unk"]["id"]
_lowerCAmelCase : Any = Tokenizer.from_str(json.dumps(__a))
| 36
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase_ ( a):
def snake_case__ ( self, __a):
'''simple docstring'''
return 0.0
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCAmelCase : Optional[int] = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 512
_lowerCAmelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : Optional[Any] = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : int = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : str = np.abs(np.fft.fft(_lowerCamelCase ) )
_lowerCAmelCase : Union[str, Any] = 20 * np.logaa(_lowerCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
_lowerCAmelCase : List[Any] = get_bounds(_lowerCamelCase , _lowerCamelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_lowerCamelCase )
plt.show()
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 512
_lowerCAmelCase : Optional[Any] = [1] + [0] * (size - 1)
_lowerCAmelCase : str = [filter_type.process(_lowerCamelCase ) for item in inputs]
_lowerCAmelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCAmelCase : Optional[Any] = np.angle(np.fft.fft(_lowerCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_lowerCamelCase , -2 * pi ) )
plt.show()
| 36
| 1
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class A ( UpperCAmelCase_ ):
def lowercase_ (self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase , "num_attention_heads" ) )
class A :
def __init__(self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]=1_3 , __UpperCAmelCase : List[Any]=3_2 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Dict=3 , __UpperCAmelCase : List[Any]=6_4_0 , __UpperCAmelCase : List[str]=4 , __UpperCAmelCase : Dict="silu" , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : Optional[int]=3_2 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Dict=0.02 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : List[Any]=1_0 , __UpperCAmelCase : str=None , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = image_size
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = last_hidden_size
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = conv_kernel_size
UpperCAmelCase__ = output_stride
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = classifier_dropout_prob
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = is_training
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = scope
def lowercase_ (self : str ) -> int:
"""simple docstring"""
UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ (self : Tuple ) -> Dict:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ (self : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = MobileViTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ (self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MobileViTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = MobileViTForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase__ = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ (self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : List[Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : str = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Tuple = False
def lowercase_ (self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = MobileViTModelTester(self )
UpperCAmelCase__ = MobileViTConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def lowercase_ (self : Dict ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def lowercase_ (self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def lowercase_ (self : List[str] ) -> Any:
"""simple docstring"""
pass
def lowercase_ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = model_class(__UpperCAmelCase )
UpperCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ = [*signature.parameters.keys()]
UpperCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase_ (self : str ) -> int:
"""simple docstring"""
pass
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : int ):
UpperCAmelCase__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
UpperCAmelCase__ = outputs.hidden_states
UpperCAmelCase__ = 5
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase__ = 2
for i in range(len(__UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def lowercase_ (self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
@slow
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ = MobileViTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def lowerCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def lowercase_ (self : Optional[int] ) -> str:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def lowercase_ (self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(__UpperCAmelCase )
UpperCAmelCase__ = self.default_image_processor
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__UpperCAmelCase )
# verify the logits
UpperCAmelCase__ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
UpperCAmelCase__ = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
UpperCAmelCase__ = model.to(__UpperCAmelCase )
UpperCAmelCase__ = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__UpperCAmelCase )
UpperCAmelCase__ = outputs.logits
# verify the logits
UpperCAmelCase__ = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , __UpperCAmelCase )
UpperCAmelCase__ = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ (self : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
UpperCAmelCase__ = model.to(__UpperCAmelCase )
UpperCAmelCase__ = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
UpperCAmelCase__ = prepare_img()
UpperCAmelCase__ = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ = model(**__UpperCAmelCase )
UpperCAmelCase__ = outputs.logits.detach().cpu()
UpperCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(5_0, 6_0)] )
UpperCAmelCase__ = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
UpperCAmelCase__ = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
UpperCAmelCase__ = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , __UpperCAmelCase )
| 143
|
from __future__ import annotations
def lowerCAmelCase_ ( __A ) -> list[int]:
'''simple docstring'''
if len(__A ) == 0:
return array
UpperCAmelCase__ , UpperCAmelCase__ = min(__A ), max(__A )
# Compute the variables
UpperCAmelCase__ = _max - _min + 1
UpperCAmelCase__ , UpperCAmelCase__ = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCAmelCase__ = i - _min
UpperCAmelCase__ = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCAmelCase__ = 0
for i in range(__A ):
while holes_repeat[i] > 0:
UpperCAmelCase__ = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = input('Enter numbers separated by comma:\n')
UpperCamelCase__ = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 143
| 1
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = field
__UpperCamelCase = path_or_paths if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else {self.split: path_or_paths}
__UpperCamelCase = Json(
cache_dir=__UpperCAmelCase , data_files=__UpperCAmelCase , features=__UpperCAmelCase , field=__UpperCAmelCase , **__UpperCAmelCase , )
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.streaming:
__UpperCamelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
__UpperCamelCase = self.builder.as_dataset(
split=self.split , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
__UpperCamelCase = dataset
__UpperCamelCase = path_or_buf
__UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__UpperCamelCase = num_proc
__UpperCamelCase = 'utf-8'
__UpperCamelCase = to_json_kwargs
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.to_json_kwargs.pop('path_or_buf' , __UpperCAmelCase )
__UpperCamelCase = self.to_json_kwargs.pop('orient' , 'records' )
__UpperCamelCase = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
__UpperCamelCase = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
__UpperCamelCase = self.to_json_kwargs.pop('compression' , __UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=__UpperCAmelCase ) as buffer:
__UpperCamelCase = self._write(file_obj=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
' was passed. Please provide a local path instead.' )
__UpperCamelCase = self._write(
file_obj=self.path_or_buf , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **self.to_json_kwargs )
return written
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args
__UpperCamelCase = query_table(
table=self.dataset.data , key=slice(__UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
__UpperCamelCase = batch.to_pandas().to_json(
path_or_buf=__UpperCAmelCase , orient=__UpperCAmelCase , lines=__UpperCAmelCase , index=__UpperCAmelCase , **__UpperCAmelCase )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
__UpperCamelCase = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__UpperCAmelCase )
else:
__UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __UpperCAmelCase , __UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(__UpperCAmelCase )
return written
| 316
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCamelCase : Any = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = ["pixel_values"]
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = 8 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
__UpperCamelCase = pad_size
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
'''simple docstring'''
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase = get_image_size(__UpperCAmelCase )
__UpperCamelCase = (old_height // size + 1) * size - old_height
__UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__UpperCAmelCase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
'''simple docstring'''
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_pad if do_pad is not None else self.do_pad
__UpperCamelCase = pad_size if pad_size is not None else self.pad_size
__UpperCamelCase = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images]
if do_pad:
__UpperCamelCase = [self.pad(__UpperCAmelCase , size=__UpperCAmelCase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
__UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 316
| 1
|
from math import isclose, sqrt
def lowerCAmelCase_ ( _lowercase : float , _lowercase : float , _lowercase : float) -> List[str]:
"""simple docstring"""
a__ : Any = point_y / 4 / point_x
a__ : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
a__ : Any = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
a__ : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
a__ : Optional[int] = outgoing_gradient**2 + 4
a__ : Tuple = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
a__ : Tuple = (point_y - outgoing_gradient * point_x) ** 2 - 100
a__ : Optional[int] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term)
) / (2 * quadratic_term)
a__ : int = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term)
) / (2 * quadratic_term)
# two solutions, one of which is our input point
a__ : str = x_minus if isclose(lowerCAmelCase__ , lowerCAmelCase__) else x_plus
a__ : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def lowerCAmelCase_ ( _lowercase : float = 1.4 , _lowercase : float = -9.6) -> Union[str, Any]:
"""simple docstring"""
a__ : int = 0
a__ : float = first_x_coord
a__ : float = first_y_coord
a__ : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.01 <= point_x <= 0.01 and point_y > 0):
a__ : Tuple = next_point(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f'{solution() = }')
| 366
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase_ ( _lowercase : Union[str, Any] , _lowercase : Dict=False) -> Any:
"""simple docstring"""
try:
a__ : str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a__ : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
a__ : Optional[int] = strtobool(_lowercase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'''If set, {key} must be yes or no.''')
return _value
_lowercase : Dict =parse_flag_from_env("RUN_SLOW", default=False)
def lowerCAmelCase_ ( _lowercase : Any) -> str:
"""simple docstring"""
return unittest.skip("""Test was skipped""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : str) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , """test is slow""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : List[Any]) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : List[Any]) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> Dict:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Tuple) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Dict) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Tuple) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , """test requires TPU""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : str) -> int:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Any) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : int) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : int) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""") , """test requires torch version >= 1.12.0""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Any=None , _lowercase : List[str]=None) -> Dict:
"""simple docstring"""
if test_case is None:
return partial(_lowercase , version=_lowercase)
return unittest.skipUnless(is_torch_version(""">=""" , _lowercase) , F'''test requires torch version >= {version}''')(_lowercase)
def lowerCAmelCase_ ( _lowercase : Any) -> int:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : str) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , """test requires wandb""")(_lowercase)
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""")(_lowercase)
_lowercase : List[str] =(
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(_lowercase)
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Optional[Any] = True
@classmethod
def SCREAMING_SNAKE_CASE__( cls ) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = tempfile.mkdtemp()
@classmethod
def SCREAMING_SNAKE_CASE__( cls ) -> Dict:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__lowercase )
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
a__ : Tuple = mocks if isinstance(__lowercase , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> List[Any]:
"""simple docstring"""
a__ : Tuple = AcceleratorState()
a__ : List[str] = tensor[None].clone().to(state.device)
a__ : Any = gather(_lowercase).cpu()
a__ : Optional[Any] = tensor[0].cpu()
for i in range(tensors.shape[0]):
if not torch.equal(tensors[i] , _lowercase):
return False
return True
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
a__ : Any = returncode
a__ : List[Any] = stdout
a__ : Any = stderr
async def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> List[Any]:
"""simple docstring"""
while True:
a__ : str = await stream.readline()
if line:
callback(_lowercase)
else:
break
async def lowerCAmelCase_ ( _lowercase : Any , _lowercase : Union[str, Any]=None , _lowercase : List[str]=None , _lowercase : Tuple=None , _lowercase : Optional[Any]=False , _lowercase : Dict=False) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_lowercase))
a__ : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowercase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a__ : int = []
a__ : Optional[int] = []
def tee(_lowercase : List[str] , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Optional[Any]=""):
a__ : int = line.decode("""utf-8""").rstrip()
sink.append(_lowercase)
if not quiet:
print(_lowercase , _lowercase , file=_lowercase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _lowercase: tee(_lowercase , _lowercase , sys.stdout , label="""stdout:"""))),
asyncio.create_task(_read_stream(p.stderr , lambda _lowercase: tee(_lowercase , _lowercase , sys.stderr , label="""stderr:"""))),
] , timeout=_lowercase , )
return _RunOutput(await p.wait() , _lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : Optional[int] , _lowercase : Optional[int]=None , _lowercase : Tuple=None , _lowercase : Any=180 , _lowercase : List[Any]=False , _lowercase : Dict=True) -> _RunOutput:
"""simple docstring"""
a__ : Any = asyncio.get_event_loop()
a__ : List[Any] = loop.run_until_complete(
_stream_subprocess(_lowercase , env=_lowercase , stdin=_lowercase , timeout=_lowercase , quiet=_lowercase , echo=_lowercase))
a__ : Optional[int] = """ """.join(_lowercase)
if result.returncode > 0:
a__ : List[Any] = """\n""".join(result.stderr)
raise RuntimeError(
F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
F'''The combined stderr from workers follows:\n{stderr}''')
return result
class snake_case__ (A__ ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( _lowercase : List[str] , _lowercase : Optional[int]=False) -> Dict:
"""simple docstring"""
try:
a__ : List[Any] = subprocess.check_output(_lowercase , stderr=subprocess.STDOUT)
if return_stdout:
if hasattr(_lowercase , """decode"""):
a__ : Tuple = output.decode("""utf-8""")
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'''Command `{' '.join(_lowercase)}` failed with the following error:\n\n{e.output.decode()}''') from e
| 266
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[int] = '''lilt'''
def __init__( self : Tuple , UpperCamelCase : Tuple=3_05_22 , UpperCamelCase : int=7_68 , UpperCamelCase : Optional[int]=12 , UpperCamelCase : Union[str, Any]=12 , UpperCamelCase : Optional[int]=30_72 , UpperCamelCase : Union[str, Any]="gelu" , UpperCamelCase : str=0.1 , UpperCamelCase : str=0.1 , UpperCamelCase : Dict=5_12 , UpperCamelCase : int=2 , UpperCamelCase : str=0.02 , UpperCamelCase : Optional[Any]=1E-1_2 , UpperCamelCase : Dict=0 , UpperCamelCase : int="absolute" , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Dict=4 , UpperCamelCase : Optional[int]=10_24 , **UpperCamelCase : Dict , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase__ : int = max_position_embeddings
lowerCAmelCase__ : str = type_vocab_size
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : int = position_embedding_type
lowerCAmelCase__ : Any = classifier_dropout
lowerCAmelCase__ : List[Any] = channel_shrink_ratio
lowerCAmelCase__ : Optional[int] = max_ad_position_embeddings
| 242
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Dict = '''vit_msn'''
def __init__( self : Optional[int] , lowerCAmelCase__ : str=7_6_8 , lowerCAmelCase__ : List[str]=1_2 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : Optional[Any]=3_0_7_2 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : int=1e-06 , lowerCAmelCase__ : Union[str, Any]=2_2_4 , lowerCAmelCase__ : Optional[int]=1_6 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : str=True , **lowerCAmelCase__ : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Optional[int] = qkv_bias
| 145
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[str] = '''xlm-roberta'''
def __init__( self , lowerCAmelCase_=3_05_22 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> List[Any]:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class a ( __lowerCAmelCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 81
|
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
_SCREAMING_SNAKE_CASE = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def snake_case ( snake_case__ :str = "dhaka" , snake_case__ :int = 5) -> int:
_A = min(snake_case__ , 50) # Prevent abuse!
_A = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
_A = requests.get("""https://www.google.com/search""" , params=snake_case__ , headers=snake_case__)
_A = BeautifulSoup(html.text , """html.parser""")
_A = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script"""))))
_A = json.dumps(snake_case__)
_A = json.loads(snake_case__)
_A = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , snake_case__ , )
if not matched_google_image_data:
return 0
_A = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(snake_case__) , )
_A = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , snake_case__ , )
for index, fixed_full_res_image in enumerate(snake_case__):
if index >= max_images:
return index
_A = bytes(snake_case__ , """ascii""").decode(
"""unicode-escape""")
_A = bytes(snake_case__ , """ascii""").decode(
"""unicode-escape""")
_A = urllib.request.build_opener()
_A = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(snake_case__)
_A = F'''query_{query.replace(' ' , '_')}'''
if not os.path.exists(snake_case__):
os.makedirs(snake_case__)
urllib.request.urlretrieve( # noqa: S310
snake_case__ , F'''{path_name}/original_size_img_{index}.jpg''')
return index
if __name__ == "__main__":
try:
_SCREAMING_SNAKE_CASE = download_images_from_google_query(sys.argv[1])
print(F'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 81
| 1
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__UpperCamelCase : List[str] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
__UpperCamelCase : Tuple = []
__UpperCamelCase : List[str] = []
__UpperCamelCase : int = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
__UpperCamelCase : str = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": f'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
"""emoji""": True,
},
}
]
__UpperCamelCase : Tuple = 0
for log in Path().glob("""*.log"""):
__UpperCamelCase : Any = 0
with open(log, """r""") as f:
for line in f:
__UpperCamelCase : Optional[int] = json.loads(line)
if line.get("""nodeid""", """""") != "":
__UpperCamelCase : Dict = line["""nodeid"""]
if line.get("""duration""", None) is not None:
__UpperCamelCase : Union[str, Any] = f'''{line['duration']:.4f}'''
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__UpperCamelCase : Tuple = []
log.unlink()
__UpperCamelCase : List[str] = """"""
__UpperCamelCase : List[Any] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__UpperCamelCase : Any = []
__UpperCamelCase : Any = {}
for test in failed_tests:
__UpperCamelCase : Any = test[0].split("""::""")
__UpperCamelCase : List[Any] = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
__UpperCamelCase : Dict = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__UpperCamelCase : str = [test[0] for test in failed_table]
__UpperCamelCase : str = list(set(files))
# Count number of instances in failed_tests
__UpperCamelCase : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__UpperCamelCase : Any = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__UpperCamelCase : str = """Too many failed tests, please see the full report in the Action results."""
__UpperCamelCase : Optional[Any] = len(err) + 10
__UpperCamelCase : int = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
__UpperCamelCase : List[str] = """No failed tests! 🤗"""
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
__UpperCamelCase : int = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
__UpperCamelCase : Dict = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
__UpperCamelCase : int = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": f'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
__UpperCamelCase : Dict = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": f'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
__UpperCamelCase : str = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
__UpperCamelCase : int = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__UpperCamelCase : Optional[Any] = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__UpperCamelCase : Dict = row[0]
else:
__UpperCamelCase : Dict = """"""
__UpperCamelCase : List[Any] = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 307
|
def a_ ( _A , _A ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def a_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 307
| 1
|
"""simple docstring"""
from numpy import exp, pi, sqrt
def lowercase__ ( snake_case_ :int , snake_case_ :float = 0.0 , snake_case_ :float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _UpperCAmelCase :
def __init__( self : Dict , _lowercase : int , _lowercase : List[str]=13 , _lowercase : Dict=32 , _lowercase : Any=2 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=16 , _lowercase : Optional[int]=[1, 2, 1] , _lowercase : int=[2, 2, 4] , _lowercase : Optional[Any]=2 , _lowercase : Union[str, Any]=2.0 , _lowercase : Any=True , _lowercase : Optional[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : Dict=0.1 , _lowercase : str="gelu" , _lowercase : List[Any]=False , _lowercase : List[Any]=True , _lowercase : Optional[Any]=0.02 , _lowercase : str=1E-5 , _lowercase : str=True , _lowercase : Any=None , _lowercase : Tuple=True , _lowercase : Any=10 , _lowercase : int=8 , _lowercase : Optional[Any]=["stage1", "stage2", "stage3"] , _lowercase : Optional[Any]=[1, 2, 3] , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = patch_norm
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = is_training
__UpperCAmelCase = scope
__UpperCAmelCase = use_labels
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = encoder_stride
__UpperCAmelCase = out_features
__UpperCAmelCase = out_indices
def a ( self : int ):
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a ( self : Dict ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : int ):
__UpperCAmelCase = MaskFormerSwinModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
__UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a ( self : int , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Dict ):
__UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_lowercase ):
__UpperCAmelCase = ['''stem''']
__UpperCAmelCase = MaskFormerSwinBackbone(config=_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
a__ : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
a__ : Optional[int] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
a__ : List[str] = False
a__ : int = False
a__ : str = False
a__ : str = False
a__ : Any = False
def a ( self : Optional[Any] ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=_lowercase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def a ( self : int ):
pass
def a ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : str ):
return
def a ( self : Optional[Any] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def a ( self : Optional[int] ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_lowercase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def a ( self : List[Any] ):
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def a ( self : str ):
pass
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def a ( self : Union[str, Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def a ( self : Optional[Any] ):
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def a ( self : Optional[Any] ):
pass
def a ( self : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str] , _lowercase : Dict , _lowercase : Tuple ):
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# Swin has a different seq_length
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a ( self : str ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = 3
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def a ( self : Any ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a ( self : str ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def a ( self : Tuple ):
pass
def a ( self : Tuple ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_lowercase : List[str] ):
__UpperCAmelCase = 0
return t
def check_equivalence(_lowercase : List[Any] , _lowercase : Any , _lowercase : str , _lowercase : List[str]={} ):
with torch.no_grad():
__UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase )
__UpperCAmelCase = model(**_lowercase , return_dict=_lowercase , **_lowercase ).to_tuple()
def recursive_check(_lowercase : Dict , _lowercase : Optional[Any] ):
if isinstance(_lowercase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_lowercase , _lowercase ):
recursive_check(_lowercase , _lowercase )
elif isinstance(_lowercase , _lowercase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_lowercase , _lowercase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_lowercase ) , set_nan_tensor_to_zero(_lowercase ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}. Dict has'''
F''' `nan`: {torch.isnan(_lowercase ).any()} and `inf`: {torch.isinf(_lowercase )}.'''
) , )
recursive_check(_lowercase , _lowercase )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(_lowercase )
model.to(_lowercase )
model.eval()
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__UpperCAmelCase = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
check_equivalence(_lowercase , _lowercase , _lowercase , {'''output_hidden_states''': True} )
@require_torch
class _UpperCAmelCase ( unittest.TestCase , _lowerCAmelCase ):
a__ : Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
a__ : List[str] = MaskFormerSwinConfig
def a ( self : List[str] ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
def a ( self : List[Any] ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
__UpperCAmelCase = backbone_class(_lowercase )
backbone.to(_lowercase )
backbone.eval()
__UpperCAmelCase = backbone(**_lowercase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _lowercase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__UpperCAmelCase = backbone(**_lowercase , output_hidden_states=_lowercase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__UpperCAmelCase = backbone(**_lowercase , output_attentions=_lowercase )
self.assertIsNotNone(outputs.attentions )
| 86
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __lowerCamelCase ( A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def __lowerCamelCase ( A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = 'imagenet-1k-id2label.json'
UpperCamelCase = 1_000
UpperCamelCase = 'huggingface/label-files'
UpperCamelCase = num_labels
UpperCamelCase = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type='dataset' ) ) , 'r' ) )
UpperCamelCase = {int(A__ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
UpperCamelCase = UpperCamelCase = CvtConfig(num_labels=A__ , idalabel=A__ , labelaid=A__ )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
UpperCamelCase = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
UpperCamelCase = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCamelCase = [2, 2, 20]
UpperCamelCase = [3, 12, 16]
UpperCamelCase = [192, 768, 1_024]
UpperCamelCase = CvtForImageClassification(A__ )
UpperCamelCase = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
UpperCamelCase = image_size
UpperCamelCase = torch.load(A__ , map_location=torch.device('cpu' ) )
UpperCamelCase = OrderedDict()
UpperCamelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCamelCase = list_of_state_dict + cls_token(A__ )
UpperCamelCase = list_of_state_dict + embeddings(A__ )
for cnt in range(config.depth[idx] ):
UpperCamelCase = list_of_state_dict + attention(A__ , A__ )
UpperCamelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(A__ )
for i in range(len(A__ ) ):
UpperCamelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=384,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Any = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 28
|
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase__ ( a__ : float , a__ : float , a__ : int ) -> float:
UpperCamelCase_ = x
UpperCamelCase_ = y
for step in range(a__ ): # noqa: B007
UpperCamelCase_ = a * a - b * b + x
UpperCamelCase_ = 2 * a * b + y
UpperCamelCase_ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase__ ( a__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase__ ( a__ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a__ , 1 , 1 ) )
def lowerCamelCase__ ( a__ : int = 800 , a__ : int = 600 , a__ : float = -0.6 , a__ : float = 0 , a__ : float = 3.2 , a__ : int = 50 , a__ : bool = True , ) -> Image.Image:
UpperCamelCase_ = Image.new("""RGB""" , (image_width, image_height) )
UpperCamelCase_ = img.load()
# loop through the image-coordinates
for image_x in range(a__ ):
for image_y in range(a__ ):
# determine the figure-coordinates based on the image-coordinates
UpperCamelCase_ = figure_width / image_width * image_height
UpperCamelCase_ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCamelCase_ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCamelCase_ = get_distance(a__ , a__ , a__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCamelCase_ = get_color_coded_rgb(a__ )
else:
UpperCamelCase_ = get_black_and_white_rgb(a__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_A = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 122
| 0
|
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : int = 600_851_475_143 ) -> int:
try:
lowerCamelCase_ : Tuple =int(lowerCamelCase__ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
lowerCamelCase_ : List[Any] =1
lowerCamelCase_ : List[str] =2
while i * i <= n:
while n % i == 0:
lowerCamelCase_ : Optional[int] =i
n //= i
i += 1
if n > 1:
lowerCamelCase_ : Any =n
return int(lowerCamelCase__ )
if __name__ == "__main__":
print(f'{solution() = }')
| 209
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ) -> tuple:
lowerCamelCase_ : Optional[Any] =namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.