code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCAmelCase_ ( __UpperCAmelCase : Namespace ) -> Optional[Any]:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
lowerCamelCase__ : Union[str, Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase_ ( _lowerCAmelCase : ArgumentParser ):
SCREAMING_SNAKE_CASE_ = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' , type=_lowerCAmelCase , default='' , help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , *_lowerCAmelCase : int , ):
SCREAMING_SNAKE_CASE_ = logging.get_logger('transformers-cli/converting' )
self._logger.info(F"Loading model {model_type}" )
SCREAMING_SNAKE_CASE_ = model_type
SCREAMING_SNAKE_CASE_ = tf_checkpoint
SCREAMING_SNAKE_CASE_ = pytorch_dump_output
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = finetuning_task_name
def lowerCAmelCase_ ( self : List[Any] ):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
if "ckpt" in self._tf_checkpoint.lower():
SCREAMING_SNAKE_CASE_ = self._tf_checkpoint
SCREAMING_SNAKE_CASE_ = ''
else:
SCREAMING_SNAKE_CASE_ = self._tf_checkpoint
SCREAMING_SNAKE_CASE_ = ''
convert_transfo_xl_checkpoint_to_pytorch(
_lowerCAmelCase , self._config , self._pytorch_dump_output , _lowerCAmelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowerCAmelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 225
|
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = {}
def lowerCAmelCase_ ( self : List[str] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCAmelCase , ' -> ' , ' -> '.join([str(_lowerCAmelCase ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCAmelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def lowerCAmelCase_ ( self : Dict ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 225
| 1
|
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a : Optional[Any]= 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_a : str= {
# fairseq:
"wmt19-ru-en": {"length_penalty": 1.1},
"wmt19-en-ru": {"length_penalty": 1.1_5},
"wmt19-en-de": {"length_penalty": 1.0},
"wmt19-de-en": {"length_penalty": 1.1},
# allenai:
"wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
"wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
"wmt16-en-de-12-1": {"length_penalty": 0.8},
"wmt19-de-en-6-6-base": {"length_penalty": 0.6},
"wmt19-de-en-6-6-big": {"length_penalty": 0.6},
}
# this remaps the different models to their organization names
_a : Optional[int]= {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_a : Dict= "facebook"
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_a : List[Any]= "allenai"
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
__snake_case : List[str] = dict((re.sub(r'@@$' , '' , _a ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _a ), v) for k, v in d.items() )
__snake_case : int = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F"{k}</w>"]
__snake_case : Dict = d[k] # restore
return da
def __UpperCAmelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ) -> int:
'''simple docstring'''
assert os.path.exists(_a )
os.makedirs(_a , exist_ok=_a )
print(F"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
__snake_case : Dict = basename(_a )
__snake_case : Dict = dirname(_a )
__snake_case : List[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__snake_case : Optional[int] = cls.hub_models()
__snake_case : Optional[int] = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
__snake_case : List[Any] = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"using checkpoint {checkpoint_file}" )
__snake_case : Optional[int] = hub_utils.from_pretrained(
_a , _a , _a , archive_map=_a , **_a )
__snake_case : Optional[int] = vars(chkpt['args']['model'] )
__snake_case : List[Any] = args['source_lang']
__snake_case : List[str] = args['target_lang']
__snake_case : List[str] = dirname(_a )
__snake_case : List[str] = basename(_a )
# dicts
__snake_case : Tuple = os.path.join(_a , F"dict.{src_lang}.txt" )
__snake_case : Tuple = os.path.join(_a , F"dict.{tgt_lang}.txt" )
__snake_case : Optional[Any] = Dictionary.load(_a )
__snake_case : List[str] = rewrite_dict_keys(src_dict.indices )
__snake_case : List[str] = len(_a )
__snake_case : Optional[Any] = os.path.join(_a , 'vocab-src.json' )
print(F"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__snake_case : List[Any] = True
for k in src_vocab.keys():
if not k.islower():
__snake_case : Union[str, Any] = False
break
__snake_case : Dict = Dictionary.load(_a )
__snake_case : str = rewrite_dict_keys(tgt_dict.indices )
__snake_case : Any = len(_a )
__snake_case : List[str] = os.path.join(_a , 'vocab-tgt.json' )
print(F"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# merges_file (bpecodes)
__snake_case : Optional[Any] = os.path.join(_a , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__snake_case : Any = os.path.join(_a , _a )
if os.path.exists(_a ):
break
with open(_a , encoding='utf-8' ) as fin:
__snake_case : int = fin.read()
__snake_case : str = re.sub(r' \d+$' , '' , _a , 0 , re.M ) # remove frequency number
print(F"Generating {merges_file}" )
with open(_a , 'w' , encoding='utf-8' ) as fout:
fout.write(_a )
# model config
__snake_case : List[Any] = os.path.join(_a , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", F"need to extend tokenizer to support bpe={args['tokenizer']}"
__snake_case : str = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
__snake_case : List[str] = 5
__snake_case : List[str] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__snake_case : int = best_score_hparams[model_dir]['length_penalty']
else:
__snake_case : List[str] = 1.0
print(F"Generating {fsmt_model_config_file}" )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# tokenizer config
__snake_case : Union[str, Any] = os.path.join(_a , _a )
__snake_case : List[str] = {
'langs': [src_lang, tgt_lang],
'model_max_length': 10_24,
'do_lower_case': do_lower_case,
}
print(F"Generating {fsmt_tokenizer_config_file}" )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_a , ensure_ascii=_a , indent=_a ) )
# model
__snake_case : Optional[int] = chkpt['models'][0]
__snake_case : Tuple = model.state_dict()
# rename keys to start with 'model.'
__snake_case : Tuple = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__snake_case : Dict = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_a , _a )
__snake_case : Optional[Any] = FSMTConfig.from_pretrained(_a )
__snake_case : str = FSMTForConditionalGeneration(_a )
# check that it loads ok
model_new.load_state_dict(_a , strict=_a )
# save
__snake_case : List[str] = os.path.join(_a , _a )
print(F"Generating {pytorch_weights_dump_path}" )
torch.save(_a , _a )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(F"cd {data_root}" )
print(F"transformers-cli upload {model_dir}" )
if __name__ == "__main__":
_a : List[Any]= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fsmt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_a : Union[str, Any]= parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 361
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_a : List[str]= False
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : List[str]) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase (self : Optional[Any]) -> Union[str, Any]:
return 12
@property
def _lowercase (self : Dict) -> Union[str, Any]:
return 12
@property
def _lowercase (self : int) -> Tuple:
return 32
@property
def _lowercase (self : Optional[int]) -> Dict:
torch.manual_seed(0)
__snake_case : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _lowercase (self : List[Any]) -> Optional[int]:
__snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def _lowercase (self : Union[str, Any]) -> Optional[int]:
torch.manual_seed(0)
__snake_case : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_A)
@property
def _lowercase (self : Union[str, Any]) -> Dict:
torch.manual_seed(0)
__snake_case : Any = 12
__snake_case : int = 12
__snake_case : List[Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__snake_case : Union[str, Any] = TransformeraDModel(**_A)
return model
def _lowercase (self : Union[str, Any]) -> Dict:
__snake_case : Tuple = 'cpu'
__snake_case : List[str] = self.dummy_vqvae
__snake_case : str = self.dummy_text_encoder
__snake_case : Optional[Any] = self.dummy_tokenizer
__snake_case : Dict = self.dummy_transformer
__snake_case : Optional[int] = VQDiffusionScheduler(self.num_embed)
__snake_case : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=_A)
__snake_case : List[Any] = VQDiffusionPipeline(
vqvae=_A , text_encoder=_A , tokenizer=_A , transformer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
__snake_case : List[Any] = pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Optional[Any] = 'teddy bear playing in the pool'
__snake_case : str = torch.Generator(device=_A).manual_seed(0)
__snake_case : Union[str, Any] = pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='np')
__snake_case : Optional[int] = output.images
__snake_case : int = torch.Generator(device=_A).manual_seed(0)
__snake_case : Tuple = pipe(
[prompt] , generator=_A , output_type='np' , return_dict=_A , num_inference_steps=2)[0]
__snake_case : str = image[0, -3:, -3:, -1]
__snake_case : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__snake_case : str = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
def _lowercase (self : Tuple) -> Optional[int]:
__snake_case : Optional[Any] = 'cpu'
__snake_case : Optional[int] = self.dummy_vqvae
__snake_case : List[str] = self.dummy_text_encoder
__snake_case : Optional[int] = self.dummy_tokenizer
__snake_case : Optional[Any] = self.dummy_transformer
__snake_case : Union[str, Any] = VQDiffusionScheduler(self.num_embed)
__snake_case : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_A , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
__snake_case : Union[str, Any] = VQDiffusionPipeline(
vqvae=_A , text_encoder=_A , tokenizer=_A , transformer=_A , scheduler=_A , learned_classifier_free_sampling_embeddings=_A , )
__snake_case : Union[str, Any] = pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Union[str, Any] = 'teddy bear playing in the pool'
__snake_case : Optional[int] = torch.Generator(device=_A).manual_seed(0)
__snake_case : Tuple = pipe([prompt] , generator=_A , num_inference_steps=2 , output_type='np')
__snake_case : Optional[Any] = output.images
__snake_case : str = torch.Generator(device=_A).manual_seed(0)
__snake_case : Dict = pipe(
[prompt] , generator=_A , output_type='np' , return_dict=_A , num_inference_steps=2)[0]
__snake_case : int = image[0, -3:, -3:, -1]
__snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__snake_case : Optional[Any] = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Any) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Tuple) -> Optional[int]:
__snake_case : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
__snake_case : Union[str, Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
__snake_case : Tuple = pipeline.to(_A)
pipeline.set_progress_bar_config(disable=_A)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__snake_case : Optional[int] = torch.Generator(device=_A).manual_seed(0)
__snake_case : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=_A , output_type='np' , )
__snake_case : int = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image).max() < 2.0
| 95
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class snake_case__( snake_case_, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = BarthezTokenizer
SCREAMING_SNAKE_CASE__ : Dict = BarthezTokenizerFast
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
SCREAMING_SNAKE_CASE__ : Tuple = True
def lowercase_ ( self ) -> Union[str, Any]:
super().setUp()
lowerCAmelCase_ : List[str] = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__lowercase )
lowerCAmelCase_ : Any = tokenizer
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Optional[Any] = '''<pad>'''
lowerCAmelCase_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def lowercase_ ( self ) -> str:
lowerCAmelCase_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__lowercase ) , 1_0_1_1_2_2 )
def lowercase_ ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCAmelCase_ : Optional[Any] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
lowerCAmelCase_ : Any = self.tokenizer(
__lowercase , max_length=len(__lowercase ) , padding=__lowercase , truncation=__lowercase , return_tensors='''pt''' )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCAmelCase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(__lowercase , __lowercase )
def lowercase_ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
lowerCAmelCase_ : Optional[int] = self.get_tokenizer()
lowerCAmelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : Union[str, Any] = '''I was born in 92000, and this is falsé.'''
lowerCAmelCase_ : int = tokenizer.tokenize(__lowercase )
lowerCAmelCase_ : List[Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
lowerCAmelCase_ : Union[str, Any] = tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
lowerCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
lowerCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
lowerCAmelCase_ : List[Any] = tokenizer.encode(__lowercase )
lowerCAmelCase_ : List[Any] = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
@slow
def lowercase_ ( self ) -> Optional[int]:
# fmt: off
lowerCAmelCase_ : str = {'''input_ids''': [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCAmelCase_ : List[Any] = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__lowercase , )
| 262
|
import collections
import importlib.util
import os
import re
from pathlib import Path
lowercase = "src/transformers"
# Matches is_xxx_available()
lowercase = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
lowercase = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
lowercase = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
lowercase = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
lowercase = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
lowercase = re.compile(r"^\s*try:")
# Catches a line with else:
lowercase = re.compile(r"^\s*else:")
def __UpperCAmelCase ( a_):
if _re_test_backend.search(a_) is None:
return None
snake_case_ = [b[0] for b in _re_backend.findall(a_)]
backends.sort()
return "_and_".join(a_)
def __UpperCAmelCase ( a_):
with open(a_ , 'r' , encoding='utf-8' , newline='\n') as f:
snake_case_ = f.readlines()
snake_case_ = 0
while line_index < len(a_) and not lines[line_index].startswith('_import_structure = {'):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(a_):
return None
# First grab the objects without a specific backend in _import_structure
snake_case_ = []
while not lines[line_index].startswith('if TYPE_CHECKING') and find_backend(lines[line_index]) is None:
snake_case_ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(a_):
snake_case_ = _re_one_line_import_struct.search(a_).groups()[0]
snake_case_ = re.findall('\[([^\]]+)\]' , a_)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ')])
line_index += 1
continue
snake_case_ = _re_import_struct_key_value.search(a_)
if single_line_import_search is not None:
snake_case_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ') if len(a_) > 0]
objects.extend(a_)
elif line.startswith(' ' * 8 + '"'):
objects.append(line[9:-3])
line_index += 1
snake_case_ = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING'):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case_ = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
snake_case_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
snake_case_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(' ' * 4):
snake_case_ = lines[line_index]
if _re_import_struct_add_one.search(a_) is not None:
objects.append(_re_import_struct_add_one.search(a_).groups()[0])
elif _re_import_struct_add_many.search(a_) is not None:
snake_case_ = _re_import_struct_add_many.search(a_).groups()[0].split(', ')
snake_case_ = [obj[1:-1] for obj in imports if len(a_) > 0]
objects.extend(a_)
elif _re_between_brackets.search(a_) is not None:
snake_case_ = _re_between_brackets.search(a_).groups()[0].split(', ')
snake_case_ = [obj[1:-1] for obj in imports if len(a_) > 0]
objects.extend(a_)
elif _re_quote_object.search(a_) is not None:
objects.append(_re_quote_object.search(a_).groups()[0])
elif line.startswith(' ' * 8 + '"'):
objects.append(line[9:-3])
elif line.startswith(' ' * 12 + '"'):
objects.append(line[13:-3])
line_index += 1
snake_case_ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case_ = []
while (
line_index < len(a_)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith('else')
):
snake_case_ = lines[line_index]
snake_case_ = _re_import.search(a_)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', '))
elif line.startswith(' ' * 8):
objects.append(line[8:-2])
line_index += 1
snake_case_ = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(a_):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case_ = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
snake_case_ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
snake_case_ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(' ' * 8):
snake_case_ = lines[line_index]
snake_case_ = _re_import.search(a_)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', '))
elif line.startswith(' ' * 12):
objects.append(line[12:-2])
line_index += 1
snake_case_ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCAmelCase ( a_ , a_):
def find_duplicates(a_):
return [k for k, v in collections.Counter(a_).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
snake_case_ = []
for key in import_dict_objects.keys():
snake_case_ = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''')
snake_case_ = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''')
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
snake_case_ = 'base imports' if key == 'none' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''')
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''')
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''')
return errors
def __UpperCAmelCase ( ):
snake_case_ = []
for root, _, files in os.walk(a_):
if "__init__.py" in files:
snake_case_ = os.path.join(a_ , '__init__.py')
snake_case_ = parse_init(a_)
if objects is not None:
snake_case_ = analyze_results(*a_)
if len(a_) > 0:
snake_case_ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(a_))
if len(a_) > 0:
raise ValueError('\n\n'.join(a_))
def __UpperCAmelCase ( ):
snake_case_ = []
for path, directories, files in os.walk(a_):
for folder in directories:
# Ignore private modules
if folder.startswith('_'):
directories.remove(a_)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(a_) / folder).glob('*.py'))) == 0:
continue
snake_case_ = str((Path(a_) / folder).relative_to(a_))
snake_case_ = short_path.replace(os.path.sep , '.')
submodules.append(a_)
for fname in files:
if fname == "__init__.py":
continue
snake_case_ = str((Path(a_) / fname).relative_to(a_))
snake_case_ = short_path.replace('.py' , '').replace(os.path.sep , '.')
if len(submodule.split('.')) == 1:
submodules.append(a_)
return submodules
lowercase = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def __UpperCAmelCase ( ):
# This is to make sure the transformers module imported is the one in the repo.
snake_case_ = importlib.util.spec_from_file_location(
'transformers' , os.path.join(a_ , '__init__.py') , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
snake_case_ = spec.loader.load_module()
snake_case_ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(a_) > 0:
snake_case_ = '\n'.join(f'''- {module}''' for module in module_not_registered)
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
f'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.')
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 178
| 0
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
a__ = '''pytorch_model.bin'''
a__ = '''pytorch_model.bin.index.json'''
a__ = '''adapter_config.json'''
a__ = '''adapter_model.bin'''
a__ = '''adapter_model.safetensors'''
a__ = '''tf_model.h5'''
a__ = '''tf_model.h5.index.json'''
a__ = '''model.ckpt'''
a__ = '''flax_model.msgpack'''
a__ = '''flax_model.msgpack.index.json'''
a__ = '''model.safetensors'''
a__ = '''model.safetensors.index.json'''
a__ = '''config.json'''
a__ = '''preprocessor_config.json'''
a__ = FEATURE_EXTRACTOR_NAME
a__ = '''generation_config.json'''
a__ = '''modelcard.json'''
a__ = '''▁'''
a__ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
a__ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
a__ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
a__ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __UpperCAmelCase ( __a : int ) -> Tuple:
"""simple docstring"""
if version.parse(__a ) < version.parse(__a ):
if "dev" in min_version:
_a : Tuple = (
'''This example requires a source install from HuggingFace Transformers (see '''
'''`https://huggingface.co/docs/transformers/installation#install-from-source`),'''
)
else:
_a : List[str] = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ '''Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '''
'''versions of HuggingFace Transformers.''' )
| 15
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __UpperCAmelCase ( __a : Dict=None ) -> str:
"""simple docstring"""
if subparsers is not None:
_a : Union[str, Any] = subparsers.add_parser('''test''' )
else:
_a : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' ,default=__a ,help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) ,)
if subparsers is not None:
parser.set_defaults(func=__a )
return parser
def __UpperCAmelCase ( __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_a : List[Any] = script_name
else:
_a : Union[str, Any] = F"""--config_file={args.config_file} {script_name}"""
_a : str = ['''accelerate-launch'''] + test_args.split()
_a : str = execute_subprocess_async(__a ,env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = test_command_parser()
_a : List[Any] = parser.parse_args()
test_command(__a )
if __name__ == "__main__":
main()
| 15
| 1
|
def __magic_name__ ( __a : int ):
'''simple docstring'''
if not isinstance(__a , __a ):
raise TypeError("""Input value must be an 'int' type""" )
UpperCamelCase__ = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244
|
from collections.abc import Generator
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = 0, 1
while True:
UpperCamelCase__ , UpperCamelCase__ = b, a + b
yield b
def __magic_name__ ( __a : int = 1_000 ):
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = fibonacci_generator()
while len(str(next(__a ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 244
| 1
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase :Optional[Any] = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase :Optional[int] = 1_0
lowerCamelCase :Dict = 2_5_6
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(a__ ) < MIN_NUM_TOKENS:
return None
A_ : Optional[int] = MinHash(num_perm=a__ )
for token in set(a__ ):
min_hash.update(token.encode() )
return min_hash
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(a__ ) if len(t.strip() ) > 0}
class _lowerCAmelCase :
def __init__(self , *,
lowercase = 0.85 , ):
A_ : Dict = duplication_jaccard_threshold
A_ : Union[str, Any] = NUM_PERM
A_ : Optional[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
A_ : int = defaultdict(_lowerCamelCase )
def _a (self , lowercase , lowercase ):
A_ : Dict = self._index.query(_lowerCamelCase )
if code_key in self._index.keys:
print(F'Duplicate key {code_key}' )
return
self._index.insert(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_lowerCamelCase )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_lowerCamelCase )
def _a (self ):
A_ : Tuple = []
for base, duplicates in self._duplicate_clusters.items():
A_ : Optional[Any] = [base] + list(_lowerCamelCase )
# reformat the cluster to be a list of dict
A_ : Optional[Any] = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(_lowerCamelCase )
return duplicate_clusters
def _a (self , lowercase ):
A_ : List[Any] = self.get_duplicate_clusters()
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = element
A_ : Dict = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def a ( lowerCamelCase__ ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a__ , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = DuplicationIndex(duplication_jaccard_threshold=a__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a__ ) ) , max_queue_size=1_00 ) ):
di.add(a__ , a__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = get_tokens(a__ )
A_ : Union[str, Any] = get_tokens(a__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase :List[str] = None
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
for elementa in cluster:
A_ : List[Any] = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
A_ : str = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(a__ , a__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
A_ : Optional[Any] = 1
extremes.append(a__ )
return extremes
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
global _shared_dataset
A_ : Any = dataset
A_ : List[Any] = []
A_ : str = partial(_find_cluster_extremes_shared , jaccard_threshold=a__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a__ , a__ , ) , total=len(a__ ) , ):
extremes_list.append(a__ )
return extremes_list
def a ( lowerCamelCase__ , lowerCamelCase__ = 0.85 ):
'''simple docstring'''
A_ : str = make_duplicate_clusters(a__ , a__ )
A_ : Optional[Any] = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
A_ : List[Any] = {}
A_ : Tuple = find_extremes(a__ , a__ , a__ )
for extremes in extremes_clusters:
for element in extremes:
A_ : Optional[int] = element
A_ : int = duplicate_indices - set(extreme_dict.keys() )
A_ : Dict = dataset.filter(lambda lowerCamelCase__ , lowerCamelCase__ : idx not in remove_indices , with_indices=a__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
A_ : Optional[Any] = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
A_ : Union[str, Any] = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(a__ )}' )
print(f'Number of duplicate clusters: {len(a__ )}' )
print(f'Files in duplicate cluster: {len(a__ )}' )
print(f'Unique files in duplicate cluster: {len(a__ )}' )
print(f'Filtered dataset size: {len(a__ )}' )
return ds_filter, duplicate_clusters
| 369
|
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str , __lowercase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = len(__lowercase ) + 1
_UpperCAmelCase = len(__lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_UpperCAmelCase = [[0 for i in range(__lowercase )] for j in range(__lowercase )]
# since string of zero length match pattern of zero length
_UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowercase ):
_UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowercase ):
_UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowercase ):
for j in range(1 , __lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_UpperCAmelCase = dp[i - 1][j]
else:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__SCREAMING_SNAKE_CASE :str = '''aab'''
__SCREAMING_SNAKE_CASE :Optional[Any] = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 22
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class lowerCamelCase_ :
def __init__( self : int , _A : Tuple , _A : Any=13 , _A : Optional[int]=64 , _A : Optional[Any]=3 , _A : List[str]=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Optional[int]=[1, 2, 10] , _A : int=[7, 3, 3] , _A : Union[str, Any]=[4, 2, 2] , _A : Dict=[2, 1, 1] , _A : Optional[Any]=[2, 2, 2] , _A : Optional[Any]=[False, False, True] , _A : List[Any]=[0.0, 0.0, 0.0] , _A : str=0.0_2 , _A : Tuple=1e-12 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Optional[int]=2 , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : List[str] = patch_sizes
UpperCAmelCase__ : Any = patch_stride
UpperCAmelCase__ : Tuple = patch_padding
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Optional[int] = embed_dim
UpperCAmelCase__ : int = num_heads
UpperCAmelCase__ : Any = stride_kv
UpperCAmelCase__ : str = depth
UpperCAmelCase__ : List[Any] = cls_token
UpperCAmelCase__ : List[Any] = attention_drop_rate
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowercase_ ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TFCvtModel(config=_A )
UpperCAmelCase__ : List[str] = model(_A , training=_A )
UpperCAmelCase__ : int = (self.image_size, self.image_size)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase__ : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase__ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowercase_ ( self : Optional[Any] , _A : Optional[Any] , _A : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFCvtForImageClassification(_A )
UpperCAmelCase__ : Any = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = config_and_inputs
UpperCAmelCase__ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFCvtModelTester(self )
UpperCAmelCase__ : Tuple = TFCvtConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Any ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(_A : Dict , _A : Optional[Any] , _A : Dict ):
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Tuple = outputs.hidden_states
UpperCAmelCase__ : int = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[str] = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[int] = TFCvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Any:
UpperCAmelCase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase__ : Optional[Any] = model(**_A )
# verify the logits
UpperCAmelCase__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Union[str, Any] = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4 ) )
| 181
| 0
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( __snake_case , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
_UpperCAmelCase :int = 'ssube/stable-diffusion-x4-upscaler-onnx'
def __UpperCamelCase( self , A_=0 ):
'''simple docstring'''
UpperCamelCase : Tuple = floats_tensor((1, 3, 128, 128) , rng=random.Random(A_ ) )
UpperCamelCase : int = torch.manual_seed(A_ )
UpperCamelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : int = self.get_dummy_inputs()
UpperCamelCase : Dict = pipe(**A_ ).images
UpperCamelCase : List[Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : int = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCamelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Union[str, Any] = self.get_dummy_inputs()
UpperCamelCase : List[str] = pipe(**A_ ).images
UpperCamelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Union[str, Any] = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Optional[Any] = self.get_dummy_inputs()
UpperCamelCase : Optional[Any] = pipe(**A_ ).images
UpperCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCamelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Tuple = self.get_dummy_inputs()
UpperCamelCase : List[str] = pipe(**A_ ).images
UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : List[str] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCamelCase : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : int = self.get_dummy_inputs()
UpperCamelCase : int = pipe(**A_ ).images
UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = ort.SessionOptions()
UpperCamelCase : List[str] = False
return options
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCamelCase : Tuple = init_image.resize((128, 128) )
# using the PNDM scheduler by default
UpperCamelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : List[str] = "A fantasy landscape, trending on artstation"
UpperCamelCase : int = torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = pipe(
prompt=A_ , image=A_ , guidance_scale=7.5 , num_inference_steps=10 , generator=A_ , output_type="np" , )
UpperCamelCase : List[str] = output.images
UpperCamelCase : Any = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase : Dict = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCamelCase : Union[str, Any] = init_image.resize((128, 128) )
UpperCamelCase : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
UpperCamelCase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=A_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : List[str] = "A fantasy landscape, trending on artstation"
UpperCamelCase : Tuple = torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = pipe(
prompt=A_ , image=A_ , guidance_scale=7.5 , num_inference_steps=20 , generator=A_ , output_type="np" , )
UpperCamelCase : str = output.images
UpperCamelCase : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
UpperCamelCase : Any = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 140
|
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> list[float]:
UpperCamelCase , UpperCamelCase : List[Any] = coefficient_matrix.shape
UpperCamelCase , UpperCamelCase : Optional[int] = constant_matrix.shape
if rowsa != colsa:
UpperCamelCase : List[Any] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if colsa != 1:
UpperCamelCase : Any = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(_lowerCAmelCase )
if rowsa != rowsa:
UpperCamelCase : Tuple = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(_lowerCAmelCase )
if len(_lowerCAmelCase ) != rowsa:
UpperCamelCase : Any = (
"Number of initial values must be equal to number of rows in coefficient "
F"""matrix but received {len(_lowerCAmelCase )} and {rowsa}"""
)
raise ValueError(_lowerCAmelCase )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
UpperCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
UpperCamelCase , UpperCamelCase : str = table.shape
strictly_diagonally_dominant(_lowerCAmelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCAmelCase ):
UpperCamelCase : Optional[Any] = []
for row in range(_lowerCAmelCase ):
UpperCamelCase : Optional[int] = 0
for col in range(_lowerCAmelCase ):
if col == row:
UpperCamelCase : Union[str, Any] = table[row][col]
elif col == cols - 1:
UpperCamelCase : List[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
UpperCamelCase : Dict = (temp + val) / denom
new_val.append(_lowerCAmelCase )
UpperCamelCase : List[str] = new_val
return [float(_lowerCAmelCase ) for i in new_val]
def A_ ( _lowerCAmelCase ) -> bool:
UpperCamelCase , UpperCamelCase : Dict = table.shape
UpperCamelCase : List[Any] = True
for i in range(0 , _lowerCAmelCase ):
UpperCamelCase : Union[str, Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 1
|
'''simple docstring'''
def a_ ( __snake_case : Any , __snake_case : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ =''''''
for i in table:
res += inp[i - 1]
return res
def a_ ( __snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
return data[1:] + data[0]
def a_ ( __snake_case : str , __snake_case : Tuple ) -> int:
"""simple docstring"""
lowerCamelCase_ =''''''
for i in range(len(__snake_case ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def a_ ( __snake_case : Optional[Any] , __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =int('''0b''' + data[0] + data[-1] , 2 )
lowerCamelCase_ =int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def a_ ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : int , __snake_case : Tuple , __snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =message[:4]
lowerCamelCase_ =message[4:]
lowerCamelCase_ =apply_table(__snake_case , __snake_case )
lowerCamelCase_ =xor(__snake_case , __snake_case )
lowerCamelCase_ =apply_sbox(__snake_case , temp[:4] ) # noqa: E741
lowerCamelCase_ =apply_sbox(__snake_case , temp[4:] )
lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + l # noqa: E741
lowerCamelCase_ ='''0''' * (2 - len(__snake_case )) + r
lowerCamelCase_ =apply_table(l + r , __snake_case )
lowerCamelCase_ =xor(__snake_case , __snake_case )
return temp + right
if __name__ == "__main__":
a_ : Any = input("""Enter 10 bit key: """)
a_ : Any = input("""Enter 8 bit message: """)
a_ : str = [6, 3, 7, 4, 8, 5, 10, 9]
a_ : str = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
a_ : str = [2, 4, 3, 1]
a_ : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7]
a_ : Optional[Any] = [4, 1, 3, 5, 7, 2, 8, 6]
a_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
a_ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
a_ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
a_ : List[Any] = apply_table(key, paa_table)
a_ : str = temp[:5]
a_ : Optional[Any] = temp[5:]
a_ : Tuple = left_shift(left)
a_ : Optional[Any] = left_shift(right)
a_ : str = apply_table(left + right, pa_table)
a_ : Optional[Any] = left_shift(left)
a_ : Tuple = left_shift(right)
a_ : Union[str, Any] = left_shift(left)
a_ : List[str] = left_shift(right)
a_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
a_ : Optional[int] = apply_table(message, IP)
a_ : List[Any] = function(expansion, sa, sa, keya, temp)
a_ : str = temp[4:] + temp[:4]
a_ : List[str] = function(expansion, sa, sa, keya, temp)
a_ : Union[str, Any] = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
a_ : Optional[int] = apply_table(CT, IP)
a_ : List[Any] = function(expansion, sa, sa, keya, temp)
a_ : int = temp[4:] + temp[:4]
a_ : int = function(expansion, sa, sa, keya, temp)
a_ : Optional[int] = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 75
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class __lowerCAmelCase :
pass
| 95
| 0
|
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE = 1_0 , __SCREAMING_SNAKE_CASE = 1_0_0_0 , __SCREAMING_SNAKE_CASE = True ) -> int:
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
return int((number_a + number_a) / 2 )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
assert (
isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(__SCREAMING_SNAKE_CASE ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
__lowerCAmelCase: Optional[Any] = lower
__lowerCAmelCase: Optional[int] = higher
__lowerCAmelCase: Dict = []
while True:
__lowerCAmelCase: Dict = get_avg(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
last_numbers.append(__SCREAMING_SNAKE_CASE )
if answer(__SCREAMING_SNAKE_CASE ) == "low":
__lowerCAmelCase: Optional[Any] = number
elif answer(__SCREAMING_SNAKE_CASE ) == "high":
__lowerCAmelCase: int = number
else:
break
print(F"guess the number : {last_numbers[-1]}" )
print(F"details : {last_numbers!s}" )
def a__ ( ) -> None:
__lowerCAmelCase: Optional[int] = int(input("Enter lower value : " ).strip() )
__lowerCAmelCase: List[str] = int(input("Enter high value : " ).strip() )
__lowerCAmelCase: str = int(input("Enter value to guess : " ).strip() )
guess_the_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 370
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class snake_case ( pl.LightningModule ):
def __init__( self : str , UpperCamelCase__ : List[str])-> str:
'''simple docstring'''
super().__init__()
__lowerCAmelCase: Optional[int] = model
__lowerCAmelCase: Tuple = 2
__lowerCAmelCase: List[Any] = nn.Linear(self.model.config.hidden_size , self.num_labels)
def lowercase_ ( self : Optional[int])-> str:
'''simple docstring'''
pass
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# load longformer model from model identifier
__lowerCAmelCase: List[str] = LongformerModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = LightningModel(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = torch.load(__SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase: Optional[Any] = LongformerForQuestionAnswering.from_pretrained(__SCREAMING_SNAKE_CASE )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__SCREAMING_SNAKE_CASE )
print(F"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 108
| 0
|
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
SCREAMING_SNAKE_CASE :List[str] = 'pytorch_model.bin'
SCREAMING_SNAKE_CASE :str = 'pytorch_model.bin.index.json'
SCREAMING_SNAKE_CASE :Optional[int] = 'adapter_config.json'
SCREAMING_SNAKE_CASE :Dict = 'adapter_model.bin'
SCREAMING_SNAKE_CASE :Dict = 'adapter_model.safetensors'
SCREAMING_SNAKE_CASE :str = 'tf_model.h5'
SCREAMING_SNAKE_CASE :List[Any] = 'tf_model.h5.index.json'
SCREAMING_SNAKE_CASE :str = 'model.ckpt'
SCREAMING_SNAKE_CASE :List[Any] = 'flax_model.msgpack'
SCREAMING_SNAKE_CASE :Optional[int] = 'flax_model.msgpack.index.json'
SCREAMING_SNAKE_CASE :Tuple = 'model.safetensors'
SCREAMING_SNAKE_CASE :List[Any] = 'model.safetensors.index.json'
SCREAMING_SNAKE_CASE :str = 'config.json'
SCREAMING_SNAKE_CASE :int = 'preprocessor_config.json'
SCREAMING_SNAKE_CASE :Optional[Any] = FEATURE_EXTRACTOR_NAME
SCREAMING_SNAKE_CASE :Optional[int] = 'generation_config.json'
SCREAMING_SNAKE_CASE :List[str] = 'modelcard.json'
SCREAMING_SNAKE_CASE :Optional[int] = '▁'
SCREAMING_SNAKE_CASE :Optional[Any] = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
SCREAMING_SNAKE_CASE :str = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
SCREAMING_SNAKE_CASE :Optional[Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
SCREAMING_SNAKE_CASE :List[Any] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def UpperCAmelCase ( a_ ) -> Dict:
"""simple docstring"""
if version.parse(a_ ) < version.parse(a_ ):
if "dev" in min_version:
__A = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
__A = F'''This example requires a minimum version of {min_version},'''
error_message += F''' but the version found is {__version__}.\n'''
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 15
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase ( a_ ) -> List[str]:
"""simple docstring"""
__A = args.pruning_method
__A = args.threshold
__A = args.model_name_or_path.rstrip("/" )
__A = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
__A = torch.load(os.path.join(a_ , "pytorch_model.bin" ) )
__A = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__A = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
__A = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
__A = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
__A = MagnitudeBinarizer.apply(inputs=a_ , threshold=a_ )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__A = name[:-6]
__A = model[F'''{prefix_}mask_scores''']
__A = TopKBinarizer.apply(a_ , a_ )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__A = name[:-6]
__A = model[F'''{prefix_}mask_scores''']
__A = ThresholdBinarizer.apply(a_ , a_ , a_ )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__A = name[:-6]
__A = model[F'''{prefix_}mask_scores''']
__A , __A = -0.1, 1.1
__A = torch.sigmoid(a_ )
__A = s * (r - l) + l
__A = s_bar.clamp(min=0.0 , max=1.0 )
__A = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
__A = os.path.join(
os.path.dirname(a_ ) , F'''bertarized_{os.path.basename(a_ )}''' )
if not os.path.isdir(a_ ):
shutil.copytree(a_ , a_ )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(a_ , os.path.join(a_ , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Tuple = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
SCREAMING_SNAKE_CASE :str = parser.parse_args()
main(args)
| 15
| 1
|
def _A ( snake_case ) -> int:
_lowercase : Dict = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( snake_case = 1_00 ) -> int:
_lowercase : Tuple = 1
_lowercase : str = 2
for i in range(2 , max_n + 1 ):
_lowercase : Dict = pre_numerator
_lowercase : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
_lowercase : int = cur_numerator
_lowercase : str = e_cont * pre_numerator + temp
return sum_digits(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 351
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[Any] = ['image_processor', 'tokenizer']
_SCREAMING_SNAKE_CASE : str = 'OwlViTImageProcessor'
_SCREAMING_SNAKE_CASE : List[str] = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCamelCase , )
_lowercase : Optional[int] = kwargs.pop("feature_extractor" )
_lowercase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __call__( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase="max_length" , _UpperCamelCase="np" , **_UpperCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCamelCase , _UpperCamelCase ) or (isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(text[0] , _UpperCamelCase )):
_lowercase : int = [self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(text[0] , _UpperCamelCase ):
_lowercase : str = []
# Maximum number of queries across batch
_lowercase : str = max([len(_UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCamelCase ) != max_num_queries:
_lowercase : List[Any] = t + [" "] * (max_num_queries - len(_UpperCamelCase ))
_lowercase : Tuple = self.tokenizer(_UpperCamelCase , padding=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
encodings.append(_UpperCamelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_lowercase : List[Any] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : Optional[Any] = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_lowercase : Union[str, Any] = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : int = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_lowercase : int = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_lowercase : Dict = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_lowercase : Optional[int] = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_lowercase : List[str] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_lowercase : Optional[int] = BatchEncoding()
_lowercase : List[Any] = input_ids
_lowercase : Dict = attention_mask
if query_images is not None:
_lowercase : int = BatchEncoding()
_lowercase : Any = self.image_processor(
_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase ).pixel_values
_lowercase : Any = query_pixel_values
if images is not None:
_lowercase : str = self.image_processor(_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
if text is not None and images is not None:
_lowercase : List[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCamelCase ) , tensor_type=_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCamelCase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCamelCase , )
return self.image_processor
| 199
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
lowercase__ : Optional[Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
lowercase__ : Optional[int] = 0
while number > 0:
lowercase__ : List[Any] = number % 10
sum_of_digits += last_digit
lowercase__ : List[str] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __UpperCAmelCase ( __lowerCamelCase = 1_00 ) -> int:
lowercase__ : Any = factorial(__lowerCamelCase )
lowercase__ : Dict = split_and_add(__lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 16
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A = logging.get_logger(__name__)
class _snake_case ( a__ ):
snake_case__ = ["input_features", "attention_mask"]
def __init__( self : Union[str, Any] , UpperCAmelCase : Tuple=80 , UpperCAmelCase : Tuple=16000 , UpperCAmelCase : Any=80 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Tuple=True , **UpperCAmelCase : Optional[int] , ):
super().__init__(feature_size=UpperCAmelCase , sampling_rate=UpperCAmelCase , padding_value=UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : str = num_mel_bins
__lowerCamelCase : Tuple = do_ceptral_normalize
__lowerCamelCase : Dict = normalize_means
__lowerCamelCase : str = normalize_vars
__lowerCamelCase : Optional[int] = True
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : np.ndarray , ):
__lowerCamelCase : Any = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__lowerCamelCase : Optional[int] = torch.from_numpy(UpperCAmelCase ).unsqueeze(0 )
__lowerCamelCase : str = ta_kaldi.fbank(UpperCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase : np.ndarray , UpperCAmelCase : int , UpperCAmelCase : Optional[bool] = True , UpperCAmelCase : Optional[bool] = True , UpperCAmelCase : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
__lowerCamelCase : Any = x[:input_length].mean(axis=0 )
__lowerCamelCase : Optional[int] = np.subtract(UpperCAmelCase , UpperCAmelCase )
if normalize_vars:
__lowerCamelCase : int = x[:input_length].std(axis=0 )
__lowerCamelCase : Union[str, Any] = np.divide(UpperCAmelCase , UpperCAmelCase )
if input_length < x.shape[0]:
__lowerCamelCase : Any = padding_value
# make sure array is in float32
__lowerCamelCase : List[str] = x.astype(np.floataa )
return x
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : List[np.ndarray] , UpperCAmelCase : Optional[np.ndarray] = None ):
__lowerCamelCase : Any = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase , UpperCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(UpperCAmelCase , UpperCAmelCase )
]
def __call__( self : Optional[Any] , UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : Dict , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__lowerCamelCase : Optional[int] = isinstance(UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
__lowerCamelCase : Tuple = is_batched_numpy or (
isinstance(UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCamelCase : Dict = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase , np.ndarray ):
__lowerCamelCase : Optional[int] = np.asarray(UpperCAmelCase , dtype=np.floataa )
elif isinstance(UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCamelCase : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase : Optional[int] = [raw_speech]
# extract fbank features
__lowerCamelCase : Optional[Any] = [self._extract_fbank_features(UpperCAmelCase ) for waveform in raw_speech]
# convert into correct format for padding
__lowerCamelCase : Dict = BatchFeature({"input_features": features} )
__lowerCamelCase : Optional[Any] = self.pad(
UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , truncation=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , **UpperCAmelCase , )
# make sure list is in array format
__lowerCamelCase : Tuple = padded_inputs.get("input_features" )
if isinstance(input_features[0] , UpperCAmelCase ):
__lowerCamelCase : List[str] = [np.asarray(UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
__lowerCamelCase : Optional[int] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
__lowerCamelCase : Union[str, Any] = [np.asarray(UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__lowerCamelCase : Optional[int] = (
np.array(UpperCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(UpperCAmelCase , max_length=UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__lowerCamelCase : Optional[int] = self.normalize(
padded_inputs["input_features"] , attention_mask=UpperCAmelCase )
if return_tensors is not None:
__lowerCamelCase : Optional[Any] = padded_inputs.convert_to_tensors(UpperCAmelCase )
return padded_inputs
| 135
| 0
|
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> bool:
SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(SCREAMING_SNAKE_CASE_ ) == 4 and all(0 <= int(SCREAMING_SNAKE_CASE_ ) <= 2_54 for octet in octets )
if __name__ == "__main__":
__UpperCamelCase = input().strip()
__UpperCamelCase = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 356
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = XGLMConfig
SCREAMING_SNAKE_CASE_ : List[str] = {}
SCREAMING_SNAKE_CASE_ : Optional[Any] = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=14 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=0.02 , ) -> str:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = ffn_dim
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 1
def __A ( self ) -> Optional[int]:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __A ( self ) -> int:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : int = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
SCREAMING_SNAKE_CASE_ : Optional[int] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFXGLMModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37 )
def __A ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@slow
def __A ( self ) -> Tuple:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = TFXGLMModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __A ( self ) -> Tuple:
super().test_resize_token_embeddings()
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __A ( self , lowerCAmelCase__=True ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
SCREAMING_SNAKE_CASE = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__ )
@slow
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
SCREAMING_SNAKE_CASE = tokenizer('Today is a nice day and' , return_tensors='tf' )
SCREAMING_SNAKE_CASE = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0] )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
SCREAMING_SNAKE_CASE = 'left'
# use different length sentences to test batching
SCREAMING_SNAKE_CASE = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors='tf' , padding=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = inputs['input_ids']
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12 )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence] )
| 38
| 0
|
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = [0 for i in range(r + 1 )]
# nc0 = 1
A_ : Tuple = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
A_ : Any = min(__lowercase ,__lowercase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 140
|
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
"""simple docstring"""
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase )
| 140
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A__ : int =StableDiffusionInpaintPipeline
A__ : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
A__ : Tuple =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A__ : Dict =frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A__ : Optional[int] =frozenset([] )
def A_ ( self : Any ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A_ ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(UpperCAmelCase_ ) ).convert('RGB' ).resize((64, 64) )
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(UpperCAmelCase_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionInpaintPipeline(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = sd_pipe(**UpperCAmelCase_ ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
def A_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
SCREAMING_SNAKE_CASE__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
SCREAMING_SNAKE_CASE__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
SCREAMING_SNAKE_CASE__ = 'stabilityai/stable-diffusion-2-inpainting'
SCREAMING_SNAKE_CASE__ = StableDiffusionInpaintPipeline.from_pretrained(UpperCAmelCase_ , safety_checker=UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = 'Face of a yellow cat, high resolution, sitting on a park bench'
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type='np' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
SCREAMING_SNAKE_CASE__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
SCREAMING_SNAKE_CASE__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
SCREAMING_SNAKE_CASE__ = 'stabilityai/stable-diffusion-2-inpainting'
SCREAMING_SNAKE_CASE__ = StableDiffusionInpaintPipeline.from_pretrained(
UpperCAmelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCAmelCase_ , )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = 'Face of a yellow cat, high resolution, sitting on a park bench'
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type='np' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A_ ( self : Any ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
SCREAMING_SNAKE_CASE__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
SCREAMING_SNAKE_CASE__ = 'stabilityai/stable-diffusion-2-inpainting'
SCREAMING_SNAKE_CASE__ = PNDMScheduler.from_pretrained(UpperCAmelCase_ , subfolder='scheduler' )
SCREAMING_SNAKE_CASE__ = StableDiffusionInpaintPipeline.from_pretrained(
UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = 'Face of a yellow cat, high resolution, sitting on a park bench'
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np' , )
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 169
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__snake_case = logging.getLogger(__name__)
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
SCREAMING_SNAKE_CASE__ = []
# custom device map
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ = get_keys_to_not_convert(UpperCamelCase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(UpperCamelCase_ )
# compatibility with peft
SCREAMING_SNAKE_CASE__ = load_in_abit
SCREAMING_SNAKE_CASE__ = load_in_abit
SCREAMING_SNAKE_CASE__ = get_parameter_device(UpperCamelCase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
SCREAMING_SNAKE_CASE__ = replace_with_bnb_layers(UpperCamelCase_ , UpperCamelCase_ , modules_to_not_convert=UpperCamelCase_ )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ = name.replace('.weight' , '' ).replace('.bias' , '' )
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(UpperCamelCase_ ):
param.to(UpperCamelCase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'The model device type is {model_device.type}. However, cuda is needed for quantization.'
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ = replace_with_bnb_layers(
UpperCamelCase_ , UpperCamelCase_ , modules_to_not_convert=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = get_quantized_model_device_map(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , max_memory=UpperCamelCase_ , no_split_module_classes=UpperCamelCase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=UpperCamelCase_ , offload_state_dict=UpperCamelCase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(UpperCamelCase_ , device_map=UpperCamelCase_ , offload_dir=UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[Any]:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
SCREAMING_SNAKE_CASE__ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = special_dtypes
SCREAMING_SNAKE_CASE__ = no_split_module_classes
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ = get_balanced_memory(
UpperCamelCase_ , low_zero=(device_map == 'balanced_low_0') , max_memory=UpperCamelCase_ , **UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = max_memory
SCREAMING_SNAKE_CASE__ = infer_auto_device_map(UpperCamelCase_ , **UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[Any]:
'''simple docstring'''
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _replace_with_bnb_layers(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ = []
current_key_name.append(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ = '.'.join(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=UpperCamelCase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
SCREAMING_SNAKE_CASE__ = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ = module.bias.data
bnb_module.requires_grad_(UpperCamelCase_ )
setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _replace_with_bnb_layers(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def _lowercase ( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
with init_empty_weights():
SCREAMING_SNAKE_CASE__ = deepcopy(UpperCamelCase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ = find_tied_parameters(UpperCamelCase_ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ = sum(UpperCamelCase_ , [] )
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ = False
if hasattr(UpperCamelCase_ , 'base_model_prefix' ):
SCREAMING_SNAKE_CASE__ = not hasattr(UpperCamelCase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ = list(model.named_children() )
SCREAMING_SNAKE_CASE__ = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ = set(UpperCamelCase_ ) - set(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = list(set(UpperCamelCase_ ) ) + list(UpperCamelCase_ )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ = ['.weight', '.bias']
SCREAMING_SNAKE_CASE__ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ = name.replace(UpperCamelCase_ , '' )
filtered_module_names.append(UpperCamelCase_ )
return filtered_module_names
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
for m in model.modules():
if isinstance(UpperCamelCase_ , bnb.nn.Linearabit ):
return True
return False
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
return next(parameter.parameters() ).device
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(UpperCamelCase_ , UpperCamelCase_ , 0 , dtype=UpperCamelCase_ , value=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = param_name
SCREAMING_SNAKE_CASE__ = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ = tensor_name.split('.' )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
SCREAMING_SNAKE_CASE__ = new_module
SCREAMING_SNAKE_CASE__ = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ = False
offload_weight(module._parameters[tensor_name] , UpperCamelCase_ , UpperCamelCase_ , index=UpperCamelCase_ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , UpperCamelCase_ , index=UpperCamelCase_ , )
else:
offload_weight(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index=UpperCamelCase_ )
offload_weight(UpperCamelCase_ , param_name.replace('weight' , 'SCB' ) , UpperCamelCase_ , index=UpperCamelCase_ )
set_module_tensor_to_device(UpperCamelCase_ , UpperCamelCase_ , 'meta' , dtype=UpperCamelCase_ , value=torch.empty(*param.size() ) )
| 169
| 1
|
class _A :
def __init__( self : List[Any] , _A : Optional[int] , _A : Tuple , _A : str ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = name
lowercase : str = value
lowercase : Any = weight
def __repr__( self : Optional[Any] ) -> Any:
"""simple docstring"""
return f"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __a ( self : Dict ) -> List[str]:
"""simple docstring"""
return self.value
def __a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.name
def __a ( self : str ) -> str:
"""simple docstring"""
return self.weight
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.value / self.weight
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : int = []
for i in range(len(__magic_name__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Tuple = sorted(__magic_name__ , key=__magic_name__ , reverse=__magic_name__ )
lowercase : Optional[Any] = []
lowercase : List[Any] = 0.0, 0.0
for i in range(len(__magic_name__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
|
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__=0.01 , snake_case__=1_000 ):
"""simple docstring"""
lowerCAmelCase : List[Any] = p_stop
lowerCAmelCase : Optional[Any] = max_length
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = 0
lowerCAmelCase : Tuple = False
while not stop and count < self.max_length:
yield count
count += 1
lowerCAmelCase : Dict = random.random() < self.p_stop
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=False , snake_case__=True ):
"""simple docstring"""
lowerCAmelCase : Dict = [
BatchSamplerShard(snake_case__ , 2 , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
for i in range(2 )
]
lowerCAmelCase : Any = [list(snake_case__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(snake_case__ ) for shard in batch_sampler_shards] , [len(snake_case__ ) for e in expected] )
self.assertListEqual(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase : Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase : List[str] = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Any = [[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
lowerCAmelCase : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
lowerCAmelCase : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
lowerCAmelCase : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Optional[int] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Optional[int] = [[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowerCAmelCase : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowerCAmelCase : str = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : List[str] = BatchSampler(range(22 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowerCAmelCase : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case__ )
lowerCAmelCase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , even_batches=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=snake_case__ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size.
lowerCAmelCase : Tuple = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowerCAmelCase : int = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
# Check the shards when the dataset is very small.
lowerCAmelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : int = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
lowerCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[str] = [[], []]
self.check_batch_sampler_shards(snake_case__ , snake_case__ , split_batches=snake_case__ , even_batches=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowerCAmelCase : Tuple = [BatchSamplerShard(snake_case__ , 2 , snake_case__ , even_batches=snake_case__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__=False , snake_case__=2 , snake_case__=False ):
"""simple docstring"""
random.seed(snake_case__ )
lowerCAmelCase : List[str] = list(snake_case__ )
lowerCAmelCase : Optional[int] = [
IterableDatasetShard(
snake_case__ , batch_size=snake_case__ , drop_last=snake_case__ , num_processes=snake_case__ , process_index=snake_case__ , split_batches=snake_case__ , )
for i in range(snake_case__ )
]
lowerCAmelCase : str = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(snake_case__ )
iterable_dataset_lists.append(list(snake_case__ ) )
lowerCAmelCase : List[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowerCAmelCase : Tuple = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
self.assertTrue(len(snake_case__ ) % shard_batch_size == 0 )
lowerCAmelCase : List[Any] = []
for idx in range(0 , len(snake_case__ ) , snake_case__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(snake_case__ ) < len(snake_case__ ):
reference += reference
self.assertListEqual(snake_case__ , reference[: len(snake_case__ )] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = 42
lowerCAmelCase : Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
# Edge case with a very small dataset
lowerCAmelCase : List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
self.check_iterable_dataset_shards(snake_case__ , snake_case__ , batch_size=4 , drop_last=snake_case__ , split_batches=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = BatchSampler(range(16 ) , batch_size=4 , drop_last=snake_case__ )
lowerCAmelCase : List[Any] = SkipBatchSampler(snake_case__ , 2 )
self.assertListEqual(list(snake_case__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = DataLoader(list(range(16 ) ) , batch_size=4 )
lowerCAmelCase : Optional[int] = skip_first_batches(snake_case__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowercase__ ( self ):
"""simple docstring"""
Accelerator()
lowerCAmelCase : Dict = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 108
| 0
|
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return str(a_ ) == str(a_ )[::-1]
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return int(a_ ) + int(str(a_ )[::-1] )
def UpperCAmelCase ( a_ = 1_0000 ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = []
for num in range(1, a_ ):
lowerCamelCase : List[str] = 0
lowerCamelCase : Union[str, Any] = num
while iterations < 50:
lowerCamelCase : Optional[int] = sum_reverse(a_ )
iterations += 1
if is_palindrome(a_ ):
break
else:
lychrel_nums.append(a_ )
return len(a_ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 366
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_A = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_A = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
_A = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
_A = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
_A = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
_A = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 205
| 0
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ("foo.json",)] )
def UpperCamelCase__ ( self : Tuple , __a : Optional[Any] ):
_a = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , config_name=__a )
_a = GenerationConfig.from_pretrained(__a , config_name=__a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = AutoConfig.from_pretrained("gpt2" )
_a = GenerationConfig.from_model_config(__a )
_a = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__a , __a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCamelCase__ ( self : int ):
_a = GenerationConfig()
_a = {
"max_new_tokens": 10_24,
"foo": "bar",
}
_a = copy.deepcopy(__a )
_a = generation_config.update(**__a )
# update_kwargs was not modified (no side effects)
self.assertEqual(__a , __a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__a , {"foo": "bar"} )
def UpperCamelCase__ ( self : Dict ):
_a = GenerationConfig()
_a = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(__a )
_a = GenerationConfig.from_pretrained(__a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , "bar" )
_a = GenerationConfig.from_model_config(__a )
assert not hasattr(__a , "foo" ) # no new kwargs should be initialized if from config
def UpperCamelCase__ ( self : Dict ):
_a = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __a )
self.assertEqual(default_config.num_beams , 1 )
_a = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a )
_a = GenerationConfig.from_pretrained(__a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls : Union[str, Any] ):
_a = TOKEN
HfFolder.save_token(__a )
@classmethod
def UpperCamelCase__ ( cls : Optional[Any] ):
try:
delete_repo(token=cls._token , repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def UpperCamelCase__ ( self : Optional[Any] ):
_a = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("test-generation-config" , use_auth_token=self._token )
_a = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="test-generation-config" , push_to_hub=__a , use_auth_token=self._token )
_a = GenerationConfig.from_pretrained(f'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def UpperCamelCase__ ( self : int ):
_a = GenerationConfig(
do_sample=__a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("valid_org/test-generation-config-org" , use_auth_token=self._token )
_a = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="valid_org/test-generation-config-org" , push_to_hub=__a , use_auth_token=self._token )
_a = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
| 63
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : str =[]
_lowerCamelCase : Dict =[]
for i in range(self.num_layers ):
_lowerCamelCase : Union[str, Any] =self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Any =FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Dict =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : Optional[int] =resnets
_lowerCamelCase : Dict =attentions
if self.add_downsample:
_lowerCamelCase : Union[str, Any] =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=True ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =()
for resnet, attn in zip(self.resnets , self.attentions ):
_lowerCamelCase : Union[str, Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : Union[str, Any] =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : Optional[int] =self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
_lowerCamelCase : str =[]
for i in range(self.num_layers ):
_lowerCamelCase : Tuple =self.in_channels if i == 0 else self.out_channels
_lowerCamelCase : Any =FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Union[str, Any] =resnets
if self.add_downsample:
_lowerCamelCase : List[str] =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=True ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Optional[int] =()
for resnet in self.resnets:
_lowerCamelCase : Tuple =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCamelCase : Tuple =self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : str =[]
_lowerCamelCase : List[str] =[]
for i in range(self.num_layers ):
_lowerCamelCase : List[str] =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : Tuple =self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : List[str] =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : Tuple =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : int =resnets
_lowerCamelCase : Dict =attentions
if self.add_upsample:
_lowerCamelCase : str =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any]=True ) -> Optional[int]:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_lowerCamelCase : Optional[int] =res_hidden_states_tuple[-1]
_lowerCamelCase : Union[str, Any] =res_hidden_states_tuple[:-1]
_lowerCamelCase : Any =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCamelCase : Optional[Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : List[Any] =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
_lowerCamelCase : Optional[Any] =self.upsamplers_a(lowercase_ )
return hidden_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =True
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : List[str] =[]
for i in range(self.num_layers ):
_lowerCamelCase : Tuple =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCamelCase : int =self.prev_output_channel if i == 0 else self.out_channels
_lowerCamelCase : str =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : str =resnets
if self.add_upsample:
_lowerCamelCase : List[str] =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Any=True ) -> int:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
_lowerCamelCase : List[str] =res_hidden_states_tuple[-1]
_lowerCamelCase : str =res_hidden_states_tuple[:-1]
_lowerCamelCase : List[str] =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCamelCase : Optional[Any] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
_lowerCamelCase : Union[str, Any] =self.upsamplers_a(lowercase_ )
return hidden_states
class A ( nn.Module ):
UpperCamelCase__ : int
UpperCamelCase__ : float =0.0
UpperCamelCase__ : int =1
UpperCamelCase__ : int =1
UpperCamelCase__ : bool =False
UpperCamelCase__ : bool =False
UpperCamelCase__ : jnp.dtype =jnp.floataa
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowerCamelCase : Any =[]
for _ in range(self.num_layers ):
_lowerCamelCase : Optional[int] =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
_lowerCamelCase : Tuple =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
_lowerCamelCase : List[Any] =resnets
_lowerCamelCase : List[str] =attentions
def __call__( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : List[str]=True ) -> int:
"""simple docstring"""
_lowerCamelCase : Dict =self.resnets[0](lowercase_ , lowercase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_lowerCamelCase : Tuple =attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
_lowerCamelCase : List[str] =resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
return hidden_states
| 199
| 0
|
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCAmelCase ( UpperCAmelCase = True , *UpperCAmelCase , **UpperCAmelCase ) -> Dict:
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
snake_case_ = False
if main_process_only:
snake_case_ = PartialState().local_process_index == 0
return _tqdm(*UpperCAmelCase , **UpperCAmelCase , disable=UpperCAmelCase )
| 312
|
"""simple docstring"""
import os
import numpy
import onnx
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
snake_case_ = a.name
snake_case_ = b.name
snake_case_ = ''
snake_case_ = ''
snake_case_ = a == b
snake_case_ = name_a
snake_case_ = name_b
return res
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCAmelCase , UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
snake_case_ = list(model.graph.initializer )
snake_case_ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
snake_case_ = inits[i].name
snake_case_ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]:
snake_case_ = os.path.dirname(UpperCAmelCase )
snake_case_ = os.path.basename(UpperCAmelCase )
snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) )
snake_case_ = list(model.graph.initializer )
snake_case_ = set()
snake_case_ = {}
snake_case_ = []
snake_case_ = 0
for i in range(len(UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCAmelCase )
dup_set.add(UpperCAmelCase )
snake_case_ = inits[j].data_type
snake_case_ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , UpperCAmelCase )
total_reduced_size += mem_size
snake_case_ = inits[i].name
snake_case_ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCAmelCase )
else:
snake_case_ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
snake_case_ = sorted(UpperCAmelCase )
_remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
snake_case_ = 'optimized_' + model_file_name
snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase )
onnx.save(UpperCAmelCase , UpperCAmelCase )
return new_model
| 312
| 1
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCAmelCase ( __UpperCamelCase = 3 ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(__UpperCamelCase ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
__lowercase : int = QuantumRegister(__UpperCamelCase , '''qr''' )
__lowercase : str = ClassicalRegister(__UpperCamelCase , '''cr''' )
__lowercase : str = QuantumCircuit(__UpperCamelCase , __UpperCamelCase )
__lowercase : List[Any] = number_of_qubits
for i in range(__UpperCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__UpperCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __UpperCamelCase , __UpperCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__UpperCamelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__UpperCamelCase , __UpperCamelCase )
# simulate with 10000 shots
__lowercase : str = Aer.get_backend('''qasm_simulator''' )
__lowercase : Dict = execute(__UpperCamelCase , __UpperCamelCase , shots=1_00_00 )
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \\n {quantum_fourier_transform(3)}"
)
| 249
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Any = """openai/whisper-base"""
snake_case__ : Optional[int] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
snake_case__ : Any = """transcriber"""
snake_case__ : Optional[int] = WhisperProcessor
snake_case__ : str = WhisperForConditionalGeneration
snake_case__ : Optional[Any] = ["""audio"""]
snake_case__ : Any = ["""text"""]
def _A ( self : str , __lowerCamelCase : Dict ):
return self.pre_processor(__lowerCamelCase , return_tensors="""pt""" ).input_features
def _A ( self : Dict , __lowerCamelCase : List[Any] ):
return self.model.generate(inputs=__lowerCamelCase )
def _A ( self : Any , __lowerCamelCase : Optional[Any] ):
return self.pre_processor.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )[0]
| 38
| 0
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : Tuple , lowercase_ : Dict=13 , lowercase_ : Tuple=7 , lowercase_ : Tuple=True , lowercase_ : List[str]=True , lowercase_ : List[str]=True , lowercase_ : List[str]=True , lowercase_ : str=99 , lowercase_ : Tuple=16 , lowercase_ : List[Any]=36 , lowercase_ : Optional[Any]=6 , lowercase_ : Union[str, Any]=6 , lowercase_ : List[str]=6 , lowercase_ : int=37 , lowercase_ : Dict="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : str=0.1 , lowercase_ : Optional[Any]=512 , lowercase_ : Union[str, Any]=16 , lowercase_ : int=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Tuple=3 , lowercase_ : Optional[Any]=4 , lowercase_ : List[str]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE_ : int = is_training
SCREAMING_SNAKE_CASE_ : Tuple = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : str = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = embedding_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_groups
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : int = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = type_vocab_size
SCREAMING_SNAKE_CASE_ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE_ : str = num_choices
SCREAMING_SNAKE_CASE_ : int = scope
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE_ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = AlbertModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = model(lowercase_ , token_type_ids=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Dict , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = AlbertForPreTraining(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , sentence_order_label=lowercase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : str , lowercase_ : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = AlbertForMaskedLM(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = AlbertForQuestionAnswering(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AlbertForSequenceClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Dict , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : str = AlbertForTokenClassification(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Optional[Any] = AlbertForMultipleChoice(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE_ : Dict = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE_ : int = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
SCREAMING_SNAKE_CASE_ : str = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = True
def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=False):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_)
if return_labels:
if model_class in get_values(lowercase_):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_)
SCREAMING_SNAKE_CASE_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_)
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = AlbertModelTester(self)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Dict = type
self.model_tester.create_and_check_model(*lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Dict = AlbertModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = AlbertModel.from_pretrained('''albert-base-v2''')
SCREAMING_SNAKE_CASE_ : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]])
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = model(lowercase_ , attention_mask=lowercase_)[0]
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size((1, 11, 768))
self.assertEqual(output.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase_ , atol=1e-4))
| 318
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Union[str, Any]=0):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = torch.manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Dict = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Tuple = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''')
SCREAMING_SNAKE_CASE_ : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs()
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe(**lowercase_).images
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ : Optional[int] = False
return options
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''')
SCREAMING_SNAKE_CASE_ : Tuple = init_image.resize((128, 128))
SCREAMING_SNAKE_CASE_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''')
SCREAMING_SNAKE_CASE_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowercase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : int = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : int = pipe(
prompt=lowercase_ , image=lowercase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowercase_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ : Optional[int] = output.images
SCREAMING_SNAKE_CASE_ : Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 318
| 1
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase : List[Any] = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = AlbertTokenizer
UpperCAmelCase_ = AlbertTokenizerFast
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = True
def UpperCAmelCase_ ( self :int ) -> List[str]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = AlbertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :Optional[Any] ) -> int:
UpperCAmelCase__ = "this is a test"
UpperCAmelCase__ = "this is a test"
return input_text, output_text
def UpperCAmelCase_ ( self :Dict ) -> Dict:
UpperCAmelCase__ = "<pad>"
UpperCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def UpperCAmelCase_ ( self :Dict ) -> Dict:
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(lowerCamelCase ) , 3_0000 )
def UpperCAmelCase_ ( self :str ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCAmelCase_ ( self :int ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = "I was born in 92000, and this is falsé."
UpperCAmelCase__ = tokenizer.tokenize(lowerCamelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
UpperCAmelCase__ = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(lowerCamelCase )
UpperCAmelCase__ = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def UpperCAmelCase_ ( self :Optional[int] ) -> List[str]:
UpperCAmelCase__ = AlbertTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
UpperCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [48, 25, 21, 1289] )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(lowerCamelCase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def UpperCAmelCase_ ( self :Any ) -> List[str]:
UpperCAmelCase__ = AlbertTokenizer(lowerCamelCase )
UpperCAmelCase__ = tokenizer.encode("sequence builders" )
UpperCAmelCase__ = tokenizer.encode("multi-sequence build" )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCAmelCase_ ( self :List[Any] ) -> List[str]:
# fmt: off
UpperCAmelCase__ = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 169
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _UpperCamelCase ( lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = FlaxAutoencoderKL
@property
def UpperCAmelCase_ ( self :int ) -> Optional[int]:
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3
UpperCAmelCase__ = (32, 32)
UpperCAmelCase__ = jax.random.PRNGKey(0 )
UpperCAmelCase__ = jax.random.uniform(lowerCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase_ ( self :Optional[Any] ) -> Any:
UpperCAmelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
| 169
| 1
|
def lowerCamelCase__ ( snake_case_ : list ) -> list:
if any(not isinstance(snake_case_ , snake_case_ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(snake_case_ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(snake_case_ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 354
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 238
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class __lowerCAmelCase ( A , A ):
UpperCamelCase = '''bit'''
UpperCamelCase = ['''preactivation''', '''bottleneck''']
UpperCamelCase = ['''SAME''', '''VALID''']
def __init__( self : Any , A : Union[str, Any]=3 , A : List[Any]=64 , A : Optional[Any]=[2_56, 5_12, 10_24, 20_48] , A : Tuple=[3, 4, 6, 3] , A : Optional[Any]="preactivation" , A : Any="relu" , A : Optional[int]=None , A : Dict=32 , A : Dict=0.0 , A : str=False , A : int=32 , A : str=1 , A : List[Any]=None , A : Tuple=None , **A : Any , ) -> Dict:
"""simple docstring"""
super().__init__(**A)
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types)}")
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
_UpperCAmelCase = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported")
_UpperCAmelCase = num_channels
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = layer_type
_UpperCAmelCase = hidden_act
_UpperCAmelCase = global_padding
_UpperCAmelCase = num_groups
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = embedding_dynamic_padding
_UpperCAmelCase = output_stride
_UpperCAmelCase = width_factor
_UpperCAmelCase = ['stem'] + [F"stage{idx}" for idx in range(1 , len(A) + 1)]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names)
| 339
|
import os
def a ( A__ : str = "input.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as input_file:
_lowercase =[
[int(A__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
_lowercase =len(A__ )
_lowercase =len(matrix[0] )
_lowercase =[[-1 for _ in range(A__ )] for _ in range(A__ )]
for i in range(A__ ):
_lowercase =matrix[i][0]
for j in range(1 , A__ ):
for i in range(A__ ):
_lowercase =minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , A__ ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
_lowercase =min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"{solution() = }")
| 205
| 0
|
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _snake_case ( UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
UpperCAmelCase : Union[str, Any] = []
for part_id in partition_order:
UpperCAmelCase : Union[str, Any] = df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(UpperCamelCase ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
UpperCAmelCase : Optional[int] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase : Optional[int] = spark.range(100 ).repartition(1 )
UpperCAmelCase : List[Any] = Spark(UpperCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
UpperCAmelCase : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase : Any = spark.range(10 ).repartition(2 )
UpperCAmelCase : str = [1, 0]
UpperCAmelCase : List[str] = _generate_iterable_examples(UpperCamelCase , UpperCamelCase ) # Reverse the partitions.
UpperCAmelCase : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase , UpperCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCAmelCase , UpperCAmelCase : str = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
UpperCAmelCase : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase : Any = spark.range(10 ).repartition(1 )
UpperCAmelCase : Union[str, Any] = SparkExamplesIterable(UpperCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(UpperCamelCase ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
UpperCAmelCase : List[str] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase : Optional[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
UpperCAmelCase : str = lambda UpperCamelCase : x.reverse()
UpperCAmelCase : int = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase , [2, 1, 0] )
UpperCAmelCase : int = SparkExamplesIterable(UpperCamelCase ).shuffle_data_sources(UpperCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(UpperCamelCase ):
UpperCAmelCase , UpperCAmelCase : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
UpperCAmelCase : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase : Optional[int] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCAmelCase : List[str] = SparkExamplesIterable(UpperCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase ):
UpperCAmelCase , UpperCAmelCase : int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCAmelCase : Dict = SparkExamplesIterable(UpperCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(UpperCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(UpperCamelCase ):
UpperCAmelCase , UpperCAmelCase : str = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _snake_case ( ):
UpperCAmelCase : int = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
UpperCAmelCase : Dict = spark.range(100 ).repartition(1 )
UpperCAmelCase : int = Spark(UpperCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 76
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
A: Optional[Any] = logging.get_logger(__name__)
A: Optional[int] = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : int = 'layoutlmv3'
def __init__( self , _SCREAMING_SNAKE_CASE=50265 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1024 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
'''simple docstring'''
super().__init__(
vocab_size=_SCREAMING_SNAKE_CASE , hidden_size=_SCREAMING_SNAKE_CASE , num_hidden_layers=_SCREAMING_SNAKE_CASE , num_attention_heads=_SCREAMING_SNAKE_CASE , intermediate_size=_SCREAMING_SNAKE_CASE , hidden_act=_SCREAMING_SNAKE_CASE , hidden_dropout_prob=_SCREAMING_SNAKE_CASE , attention_probs_dropout_prob=_SCREAMING_SNAKE_CASE , max_position_embeddings=_SCREAMING_SNAKE_CASE , type_vocab_size=_SCREAMING_SNAKE_CASE , initializer_range=_SCREAMING_SNAKE_CASE , layer_norm_eps=_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : List[str] = max_ad_position_embeddings
UpperCAmelCase : List[Any] = coordinate_size
UpperCAmelCase : List[Any] = shape_size
UpperCAmelCase : Any = has_relative_attention_bias
UpperCAmelCase : Optional[Any] = rel_pos_bins
UpperCAmelCase : int = max_rel_pos
UpperCAmelCase : int = has_spatial_attention_bias
UpperCAmelCase : Optional[int] = rel_ad_pos_bins
UpperCAmelCase : str = max_rel_ad_pos
UpperCAmelCase : List[Any] = text_embed
UpperCAmelCase : Tuple = visual_embed
UpperCAmelCase : List[Any] = input_size
UpperCAmelCase : Union[str, Any] = num_channels
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : Dict = classifier_dropout
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = version.parse('1.12' )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
else:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""bbox""", {0: """batch""", 1: """sequence"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels"""}),
] )
@property
def SCREAMING_SNAKE_CASE ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 12
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 40 , _SCREAMING_SNAKE_CASE = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , """apply_ocr""" , _SCREAMING_SNAKE_CASE )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : str = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : Any = processor.tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : Union[str, Any] = [[""" """.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
UpperCAmelCase : Optional[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
UpperCAmelCase : Tuple = self._generate_dummy_images(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = dict(
processor(
_SCREAMING_SNAKE_CASE , text=_SCREAMING_SNAKE_CASE , boxes=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , ) )
return inputs
| 76
| 1
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def __snake_case ( __UpperCamelCase : bool = True ,*__UpperCamelCase : Tuple ,**__UpperCamelCase : int ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
A_ = False
if main_process_only:
A_ = PartialState().local_process_index == 0
return _tqdm(*__UpperCamelCase ,**__UpperCamelCase ,disable=__UpperCamelCase )
| 312
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__a :Dict = get_logger(__name__)
__a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md'
__a :Tuple = uuida().hex
__a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
return ua
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A_ = HfFolder.get_token()
if organization is None:
A_ = whoami(__UpperCamelCase )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]:
return
A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None
A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase )
A_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,)
A_ = os.path.join(args.output_dir ,"README.md" )
model_card.save(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A_ = str(Path(__UpperCamelCase ).as_posix() )
A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase )
if search is None:
return None
A_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__a :str = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__a :List[Any] = os.path.join(hf_cache_home, 'diffusers')
def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A_ = DIFFUSERS_CACHE
if old_cache_dir is None:
A_ = old_diffusers_cache
A_ = Path(__UpperCamelCase ).expanduser()
A_ = Path(__UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase )
new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
os.replace(__UpperCamelCase ,__UpperCamelCase )
try:
os.symlink(__UpperCamelCase ,__UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__a :Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
__a :Dict = int(f.read())
except ValueError:
__a :str = 0
if cache_version < 1:
__a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'the directory exists and can be written to.'
)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A_ = weights_name.split("." )
A_ = splits[:-1] + [variant] + splits[-1:]
A_ = ".".join(__UpperCamelCase )
return weights_name
def __snake_case ( __UpperCamelCase : Optional[Any] ,*,
__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,):
"""simple docstring"""
A_ = str(__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCamelCase ):
if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ):
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A_ = hf_hub_download(
__UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,)
try:
# 2. Load model file as usual
A_ = hf_hub_download(
__UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' )
| 312
| 1
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCamelCase : str = logging.getLogger()
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_lowerCAmelCase : Any = parser.parse_args()
return args.f
def _UpperCAmelCase (UpperCamelCase_ : Dict ):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : Tuple = os.path.join(UpperCamelCase_ , """all_results.json""" )
if os.path.exists(UpperCamelCase_ ):
with open(UpperCamelCase_ , """r""" ) as f:
_lowerCAmelCase : Dict = json.load(UpperCamelCase_ )
else:
raise ValueError(F"can't find {path}" )
return results
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
_lowerCamelCase : List[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __snake_case (_a ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ) -> int:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : Optional[Any] = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
_lowerCAmelCase : int = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] ) -> Tuple:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : List[str] = f"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
_lowerCAmelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : List[Any] = f"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_lowerCAmelCase : Tuple = get_results(_UpperCAmelCase )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : str = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : str = f"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_lowerCAmelCase : str = get_results(_UpperCAmelCase )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = 7 if get_gpu_count() > 1 else 2
_lowerCAmelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_lowerCAmelCase : Optional[Any] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_lowerCAmelCase : Tuple = get_results(_UpperCAmelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : str = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Tuple = f"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_lowerCAmelCase : Dict = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[Any] = f"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_lowerCAmelCase : Optional[int] = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : List[Any] = f"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
_lowerCAmelCase : str = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """translation_no_trainer""" ) ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
_lowerCAmelCase : Tuple = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.10 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir()
_lowerCAmelCase : Optional[int] = f"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
_lowerCAmelCase : Optional[int] = get_results(_UpperCAmelCase )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """image_classification_no_trainer""" ) ) )
| 159
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = None
if token is not None:
_lowerCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : Tuple = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_lowerCAmelCase : str = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
_lowerCAmelCase : Optional[Any] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_lowerCAmelCase : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase_ ):
_lowerCAmelCase : int = requests.get(url + F"&page={i + 2}" , headers=UpperCamelCase_ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any]=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = None
if token is not None:
_lowerCAmelCase : Optional[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : Optional[int] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
_lowerCAmelCase : Optional[int] = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ ).json()
_lowerCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_lowerCAmelCase : List[str] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase_ ):
_lowerCAmelCase : List[str] = requests.get(url + F"&page={i + 2}" , headers=UpperCamelCase_ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any ):
'''simple docstring'''
_lowerCAmelCase : str = None
if token is not None:
_lowerCAmelCase : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
_lowerCAmelCase : List[str] = requests.get(UpperCamelCase_ , headers=UpperCamelCase_ , allow_redirects=UpperCamelCase_ )
_lowerCAmelCase : List[str] = result.headers["""Location"""]
_lowerCAmelCase : List[Any] = requests.get(UpperCamelCase_ , allow_redirects=UpperCamelCase_ )
_lowerCAmelCase : int = os.path.join(UpperCamelCase_ , F"{artifact_name}.zip" )
with open(UpperCamelCase_ , """wb""" ) as fp:
fp.write(response.content )
def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]=None ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Any = []
_lowerCAmelCase : Union[str, Any] = None
with zipfile.ZipFile(UpperCamelCase_ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCamelCase_ ) as f:
for line in f:
_lowerCAmelCase : List[str] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase : Union[str, Any] = line[: line.index(""": """ )]
_lowerCAmelCase : Union[str, Any] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_lowerCAmelCase : Tuple = line[len("""FAILED """ ) :]
failed_tests.append(UpperCamelCase_ )
elif filename == "job_name.txt":
_lowerCAmelCase : str = line
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase_ )} for `errors` "
F"and {len(UpperCamelCase_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
_lowerCAmelCase : int = None
if job_name and job_links:
_lowerCAmelCase : Optional[int] = job_links.get(UpperCamelCase_ , UpperCamelCase_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase : Tuple = [x + [y] + [job_link] for x, y in zip(UpperCamelCase_ , UpperCamelCase_ )]
return result
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[Any] = [os.path.join(UpperCamelCase_ , UpperCamelCase_ ) for p in os.listdir(UpperCamelCase_ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCamelCase_ , job_links=UpperCamelCase_ ) )
return errors
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase : Dict = counter.most_common()
_lowerCAmelCase : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase : Union[str, Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase : int = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=UpperCamelCase_ ) )
return r
def _UpperCAmelCase (UpperCamelCase_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : List[str] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_lowerCAmelCase : Optional[Any] = test.split("""/""" )[2]
else:
_lowerCAmelCase : Union[str, Any] = None
return test
def _UpperCAmelCase (UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase : List[str] = [x for x in logs if x[2] is not None]
_lowerCAmelCase : int = {x[2] for x in logs}
_lowerCAmelCase : str = {}
for test in tests:
_lowerCAmelCase : Union[str, Any] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase : List[Any] = counter.most_common()
_lowerCAmelCase : Optional[int] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase : List[str] = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase : int = {"""count""": n_errors, """errors""": error_counts}
_lowerCAmelCase : Dict = dict(sorted(r.items() , key=lambda UpperCamelCase_ : item[1]["count"] , reverse=UpperCamelCase_ ) )
return r
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = """| no. | error | status |"""
_lowerCAmelCase : List[Any] = """|-:|:-|:-|"""
_lowerCAmelCase : str = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase : Optional[Any] = reduced_by_error[error]["""count"""]
_lowerCAmelCase : int = F"| {count} | {error[:100]} | |"
lines.append(UpperCamelCase_ )
return "\n".join(UpperCamelCase_ )
def _UpperCAmelCase (UpperCamelCase_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase : str = """| model | no. of errors | major error | count |"""
_lowerCAmelCase : Any = """|-:|-:|-:|-:|"""
_lowerCAmelCase : str = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase : Union[str, Any] = reduced_by_model[model]["""count"""]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
_lowerCAmelCase : str = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(UpperCamelCase_ )
return "\n".join(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_lowerCamelCase : Tuple = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_lowerCamelCase : Optional[int] = get_job_links(args.workflow_run_id, token=args.token)
_lowerCamelCase : int = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_lowerCamelCase : Optional[Any] = k.find(" / ")
_lowerCamelCase : Tuple = k[index + len(" / ") :]
_lowerCamelCase : List[Any] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_lowerCamelCase : Union[str, Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_lowerCamelCase : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_lowerCamelCase : Dict = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_lowerCamelCase : Union[str, Any] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_lowerCamelCase : str = reduce_by_error(errors)
_lowerCamelCase : Tuple = reduce_by_model(errors)
_lowerCamelCase : List[str] = make_github_table(reduced_by_error)
_lowerCamelCase : Optional[Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 159
| 1
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
def __init__(self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=9_9 , A=1_6 , A=3_6 , A=6 , A=6 , A=6 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ):
lowerCamelCase_ : Optional[Any] = parent
lowerCamelCase_ : str = batch_size
lowerCamelCase_ : Union[str, Any] = seq_length
lowerCamelCase_ : Union[str, Any] = is_training
lowerCamelCase_ : str = use_input_mask
lowerCamelCase_ : Any = use_token_type_ids
lowerCamelCase_ : Union[str, Any] = use_labels
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : Optional[int] = embedding_size
lowerCamelCase_ : Any = hidden_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : Optional[int] = num_hidden_groups
lowerCamelCase_ : Tuple = num_attention_heads
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : Dict = hidden_dropout_prob
lowerCamelCase_ : List[str] = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = max_position_embeddings
lowerCamelCase_ : Tuple = type_vocab_size
lowerCamelCase_ : Tuple = type_sequence_label_size
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : str = num_labels
lowerCamelCase_ : Union[str, Any] = num_choices
lowerCamelCase_ : List[str] = scope
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Tuple = None
if self.use_input_mask:
lowerCamelCase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
lowerCamelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : Union[str, Any] = None
lowerCamelCase_ : Any = None
if self.use_labels:
lowerCamelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ (self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : Tuple = AlbertModel(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : int = model(A , attention_mask=A , token_type_ids=A )
lowerCamelCase_ : int = model(A , token_type_ids=A )
lowerCamelCase_ : Optional[Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : Optional[Any] = AlbertForPreTraining(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : Optional[int] = model(
A , attention_mask=A , token_type_ids=A , labels=A , sentence_order_label=A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : List[str] = AlbertForMaskedLM(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : Dict = AlbertForQuestionAnswering(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : str = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : List[str] = self.num_labels
lowerCamelCase_ : Any = AlbertForSequenceClassification(A )
model.to(A )
model.eval()
lowerCamelCase_ : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : Optional[Any] = self.num_labels
lowerCamelCase_ : Dict = AlbertForTokenClassification(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : Any = self.num_choices
lowerCamelCase_ : Union[str, Any] = AlbertForMultipleChoice(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : int = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) : Dict = config_and_inputs
lowerCamelCase_ : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : int = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : Dict = True
def UpperCAmelCase__ (self , A , A , A=False ):
lowerCamelCase_ : Optional[int] = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class in get_values(A ):
lowerCamelCase_ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=A )
lowerCamelCase_ : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
return inputs_dict
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AlbertModelTester(self )
lowerCamelCase_ : List[str] = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ : Tuple = type
self.model_tester.create_and_check_model(*A )
@slow
def UpperCAmelCase__ (self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : str = AlbertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class __lowercase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AlbertModel.from_pretrained('''albert-base-v2''' )
lowerCamelCase_ : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowerCamelCase_ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ : Optional[Any] = model(A , attention_mask=A )[0]
lowerCamelCase_ : Optional[Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , A )
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A , atol=1E-4 ) )
| 318
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318
| 1
|
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
__lowerCamelCase : str = int(input("""Enter number: """).strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 353
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float ):
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ):
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
snake_case_ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class a :
def __init__( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=13 , lowerCAmelCase : List[Any]=7 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Any=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : int=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Any=1 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : List[Any]=8 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : Any=512 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : List[str]=0.0_2 , lowerCAmelCase : Tuple=3 , lowerCAmelCase : str=4 , lowerCAmelCase : Any=None , lowerCAmelCase : List[str]=False , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =parent
SCREAMING_SNAKE_CASE_: Optional[Any] =batch_size
SCREAMING_SNAKE_CASE_: Dict =seq_length
SCREAMING_SNAKE_CASE_: List[str] =is_training
SCREAMING_SNAKE_CASE_: Union[str, Any] =use_input_mask
SCREAMING_SNAKE_CASE_: Optional[Any] =use_token_type_ids
SCREAMING_SNAKE_CASE_: Dict =use_labels
SCREAMING_SNAKE_CASE_: Dict =vocab_size
SCREAMING_SNAKE_CASE_: Any =block_sizes
SCREAMING_SNAKE_CASE_: Optional[int] =num_decoder_layers
SCREAMING_SNAKE_CASE_: Optional[int] =d_model
SCREAMING_SNAKE_CASE_: Any =n_head
SCREAMING_SNAKE_CASE_: Optional[int] =d_head
SCREAMING_SNAKE_CASE_: str =d_inner
SCREAMING_SNAKE_CASE_: Any =hidden_act
SCREAMING_SNAKE_CASE_: Any =hidden_dropout
SCREAMING_SNAKE_CASE_: Tuple =attention_dropout
SCREAMING_SNAKE_CASE_: List[Any] =activation_dropout
SCREAMING_SNAKE_CASE_: Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE_: List[Any] =type_vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =2
SCREAMING_SNAKE_CASE_: Optional[Any] =num_labels
SCREAMING_SNAKE_CASE_: List[Any] =num_choices
SCREAMING_SNAKE_CASE_: int =scope
SCREAMING_SNAKE_CASE_: Optional[int] =initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE_: Optional[Any] =n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE_: List[Any] =self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE_: Optional[Any] =sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE_: Dict =self.num_hidden_layers + 2
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_: List[Any] =None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_: Dict =random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_: List[str] =None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_: Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_: List[str] =None
SCREAMING_SNAKE_CASE_: List[Any] =None
SCREAMING_SNAKE_CASE_: str =None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_: Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_: Tuple =ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_: str =FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase__ ( self : int , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =TFFunnelModel(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_: str =model(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =[input_ids, input_mask]
SCREAMING_SNAKE_CASE_: Any =model(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
SCREAMING_SNAKE_CASE_: Optional[int] =TFFunnelModel(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE_: List[str] =False
SCREAMING_SNAKE_CASE_: Union[str, Any] =TFFunnelModel(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =TFFunnelBaseModel(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_: Tuple =model(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =[input_ids, input_mask]
SCREAMING_SNAKE_CASE_: List[str] =model(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE_: Union[str, Any] =False
SCREAMING_SNAKE_CASE_: int =TFFunnelBaseModel(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE_: str =False
SCREAMING_SNAKE_CASE_: Dict =TFFunnelBaseModel(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase__ ( self : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : str , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =TFFunnelForPreTraining(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_: Optional[Any] =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : int , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =TFFunnelForMaskedLM(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_: Dict =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.num_labels
SCREAMING_SNAKE_CASE_: Tuple =TFFunnelForSequenceClassification(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_: List[str] =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.num_choices
SCREAMING_SNAKE_CASE_: List[str] =TFFunnelForMultipleChoice(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Any =tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_: int =tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_: Union[str, Any] =tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_: Dict ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_: Tuple =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.num_labels
SCREAMING_SNAKE_CASE_: Any =TFFunnelForTokenClassification(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: int ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_: str =model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =TFFunnelForQuestionAnswering(config=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_: int =model(lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] =self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
): List[str] =config_and_inputs
SCREAMING_SNAKE_CASE_: Dict ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
UpperCamelCase : Optional[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase : Optional[int] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase : Optional[int] = False
UpperCamelCase : Union[str, Any] = False
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE_: str =ConfigTester(self , config_class=lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
def lowerCamelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
@require_tf
class a ( lowerCAmelCase_ , unittest.TestCase ):
UpperCamelCase : str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase : List[str] = False
UpperCamelCase : str = False
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[Any] =TFFunnelModelTester(self , base=lowerCAmelCase )
SCREAMING_SNAKE_CASE_: str =ConfigTester(self , config_class=lowerCAmelCase )
def lowerCamelCase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase )
def lowerCamelCase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase )
| 173
|
"""simple docstring"""
from __future__ import annotations
_lowercase : Dict = 1.6_021E-19 # units = C
def snake_case__ ( __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : float , ):
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238
| 0
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
_lowerCAmelCase : List[str] = len(__snake_case )
_lowerCAmelCase : str = max(__snake_case )
_lowerCAmelCase : Dict = min(__snake_case )
# create the counting array
_lowerCAmelCase : str = coll_max + 1 - coll_min
_lowerCAmelCase : List[str] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , __snake_case ):
_lowerCAmelCase : Any = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCAmelCase : List[str] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , __snake_case ) ):
_lowerCAmelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def A ( _lowerCamelCase ):
'''simple docstring'''
return "".join([chr(__snake_case ) for i in counting_sort([ord(__snake_case ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 356
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["ConvNextFeatureExtractor"]
_snake_case = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 300
| 0
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCamelCase__ ( _a , _a=10):
SCREAMING_SNAKE_CASE : int = []
for _ in range(_a):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
return lrs
def lowerCamelCase__ ( _a , _a=10):
SCREAMING_SNAKE_CASE : Dict = []
for step in range(_a):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(_a , "schedule.bin")
torch.save(scheduler.state_dict() , _a)
SCREAMING_SNAKE_CASE : int = torch.load(_a)
scheduler.load_state_dict(_a)
return lrs
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self : Any , a : Any , a : int , a : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(len(a ) , len(a ) )
for a, b in zip(a , a ):
self.assertAlmostEqual(a , a , delta=a )
def __UpperCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE : Optional[int] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
SCREAMING_SNAKE_CASE : int = criterion(a , a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=a )
SCREAMING_SNAKE_CASE : Any = torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE : str = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=a , weight_decay=0.0 , relative_step=a , scale_parameter=a , warmup_init=a , )
for _ in range(1000 ):
SCREAMING_SNAKE_CASE : str = criterion(a , a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =nn.Linear(50 , 50 ) if is_torch_available() else None
lowerCamelCase__ =AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
lowerCamelCase__ =10
def __UpperCamelCase ( self : str , a : int , a : Optional[Any] , a : Optional[Any] , a : Optional[Any]=None ) -> Dict:
"""simple docstring"""
self.assertEqual(len(a ) , len(a ) )
for a, b in zip(a , a ):
self.assertAlmostEqual(a , a , delta=a , msg=a )
def __UpperCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE : str = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = data
SCREAMING_SNAKE_CASE : Dict = scheduler_func(self.optimizer , **a )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
SCREAMING_SNAKE_CASE : int = unwrap_schedule(a , self.num_steps )
self.assertListAlmostEqual(
a , a , tol=1e-2 , msg=F"failed for {scheduler_func} in normal scheduler" , )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_func(self.optimizer , **a )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(a ) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE : Union[str, Any] = unwrap_and_save_reload_schedule(a , self.num_steps )
self.assertListEqual(a , a , msg=F"failed for {scheduler_func} in save and reload" )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Any , a : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = fn
def __call__( self : Any , *a : List[Any] , **a : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.fn(*a , **a )
@classmethod
def __UpperCamelCase ( self : int , a : Optional[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = list(map(self , scheduler.lr_lambdas ) )
| 76
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
a_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =ConvBertTokenizer
def __init__( self : List[str] , a : Union[str, Any]=None , a : Optional[int]=None , a : int=True , a : Tuple="[UNK]" , a : Dict="[SEP]" , a : Dict="[PAD]" , a : List[Any]="[CLS]" , a : Tuple="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : str , ) -> Dict:
"""simple docstring"""
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : List[str] = getattr(a , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : Any = strip_accents
SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**a )
SCREAMING_SNAKE_CASE : str = do_lower_case
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 76
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowercase__ : List[Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : int , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : List[str] , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_2_4}
lowerCAmelCase_ : int = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = crop_size if crop_size is not None else {"height": 2_5_6, "width": 2_5_6}
lowerCAmelCase_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='crop_size' )
lowerCAmelCase_ : Dict = do_resize
lowerCAmelCase_ : List[Any] = size
lowerCAmelCase_ : List[Any] = resample
lowerCAmelCase_ : List[str] = do_rescale
lowerCAmelCase_ : Union[str, Any] = rescale_factor
lowerCAmelCase_ : List[Any] = do_center_crop
lowerCAmelCase_ : Optional[int] = crop_size
lowerCAmelCase_ : Any = do_flip_channel_order
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PIL.Image.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
lowerCAmelCase_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" )
lowerCAmelCase_ : List[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase_ : Tuple = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, float] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None ):
return flip_channel_order(SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : float = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
lowerCAmelCase_ : Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : List[Any] = resample if resample is not None else self.resample
lowerCAmelCase_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : Any = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowerCAmelCase_ : Union[str, Any] = size if size is not None else self.size
lowerCAmelCase_ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='crop_size' )
lowerCAmelCase_ : Optional[Any] = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : int = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowerCAmelCase_ : List[str] = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowerCAmelCase_ : int = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowerCAmelCase_ : Tuple = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowerCAmelCase_ : int = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Tuple] = None ):
lowerCAmelCase_ : Tuple = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[Any] = target_sizes.numpy()
lowerCAmelCase_ : int = []
for idx in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase_ : str = logits.argmax(dim=1 )
lowerCAmelCase_ : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 354
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ : Tuple = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : int , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ , param_name='crop_size' )
lowerCAmelCase_ : List[Any] = do_resize
lowerCAmelCase_ : Any = do_rescale
lowerCAmelCase_ : int = do_normalize
lowerCAmelCase_ : List[str] = do_center_crop
lowerCAmelCase_ : Dict = crop_size
lowerCAmelCase_ : Optional[Any] = size
lowerCAmelCase_ : Tuple = resample
lowerCAmelCase_ : Optional[int] = rescale_factor
lowerCAmelCase_ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase_ : str = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "shortest_edge" in size:
lowerCAmelCase_ : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowerCAmelCase_ : Union[str, Any] = (size['height'], size['width'])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : int , ):
lowerCAmelCase_ : Any = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
lowerCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : Dict = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='crop_size' , default_to_square=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = resample if resample is not None else self.resample
lowerCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : Tuple = size if size is not None else self.size
lowerCAmelCase_ : str = get_size_dict(SCREAMING_SNAKE_CASE_ )
if not is_batched(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[Any] = [images]
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : str = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowerCAmelCase_ : Any = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowerCAmelCase_ : Optional[int] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowerCAmelCase_ : Tuple = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
lowerCAmelCase_ : str = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Any = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 289
| 0
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE :int = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
SCREAMING_SNAKE_CASE :List[Any] = subprocess.check_output(F'''git diff --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split()
SCREAMING_SNAKE_CASE :Optional[int] = '''|'''.join(sys.argv[1:])
SCREAMING_SNAKE_CASE :List[Any] = re.compile(RF'''^({joined_dirs}).*?\.py$''')
SCREAMING_SNAKE_CASE :List[str] = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 159
|
def _lowerCAmelCase ( lowerCAmelCase_ :int = 1_000 )->int:
'''simple docstring'''
snake_case_ , snake_case_ = 1, 1
snake_case_ = 2
while True:
snake_case_ = 0
snake_case_ = fa + fa
snake_case_ , snake_case_ = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 159
| 1
|
import logging
from transformers.configuration_utils import PretrainedConfig
_snake_case : Optional[Any] = logging.getLogger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = "masked_bert"
def __init__( self : Optional[int] , lowerCamelCase : Any=30522 , lowerCamelCase : Tuple=768 , lowerCamelCase : str=12 , lowerCamelCase : Dict=12 , lowerCamelCase : List[str]=3072 , lowerCamelCase : List[str]="gelu" , lowerCamelCase : Any=0.1 , lowerCamelCase : Any=0.1 , lowerCamelCase : List[Any]=512 , lowerCamelCase : int=2 , lowerCamelCase : str=0.02 , lowerCamelCase : List[str]=1E-12 , lowerCamelCase : Any=0 , lowerCamelCase : Dict="topK" , lowerCamelCase : List[Any]="constant" , lowerCamelCase : Dict=0.0 , **lowerCamelCase : List[Any] , ) -> Dict:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
__snake_case : Optional[Any] = vocab_size
__snake_case : Optional[int] = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Optional[Any] = hidden_act
__snake_case : str = intermediate_size
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : List[str] = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : Any = initializer_range
__snake_case : str = layer_norm_eps
__snake_case : str = pruning_method
__snake_case : int = mask_init
__snake_case : Any = mask_scale
| 351
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_snake_case : Optional[Any] = "Create a default config file for Accelerate with only a few flags set."
def lowerCAmelCase_ ( __lowerCamelCase="no" , __lowerCamelCase = default_json_config_file , __lowerCamelCase = False ):
__snake_case : int = Path(__lowerCamelCase )
path.parent.mkdir(parents=__lowerCamelCase , exist_ok=__lowerCamelCase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__snake_case : Any = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__snake_case : Optional[int] = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
__snake_case : Dict = torch.cuda.device_count()
__snake_case : Tuple = num_gpus
__snake_case : List[str] = False
if num_gpus > 1:
__snake_case : Optional[int] = "MULTI_GPU"
else:
__snake_case : Dict = "NO"
elif is_xpu_available() and use_xpu:
__snake_case : List[str] = torch.xpu.device_count()
__snake_case : str = num_xpus
__snake_case : int = False
if num_xpus > 1:
__snake_case : Optional[int] = "MULTI_XPU"
else:
__snake_case : str = "NO"
elif is_npu_available():
__snake_case : Any = torch.npu.device_count()
__snake_case : str = num_npus
__snake_case : str = False
if num_npus > 1:
__snake_case : Optional[int] = "MULTI_NPU"
else:
__snake_case : int = "NO"
else:
__snake_case : List[Any] = 0
__snake_case : Dict = True
__snake_case : Tuple = 1
__snake_case : Tuple = "NO"
__snake_case : str = ClusterConfig(**__lowerCamelCase )
config.to_json_file(__lowerCamelCase )
return path
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[Any] = parser.add_parser("default" , parents=__lowerCamelCase , help=__lowerCamelCase , formatter_class=__lowerCamelCase )
parser.add_argument(
"--config_file" , default=__lowerCamelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=__lowerCamelCase , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=__lowerCamelCase )
return parser
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 134
| 0
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any]):
a : str = 0
a : Optional[int] = [0]
a : Union[str, Any] = [0]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : List[str] = [60]
a : str = [10]
a : Optional[int] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[int]):
a : Any = 3
a : str = [1, 2, 3]
a : Tuple = [3, 2, 1]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : Tuple):
a : int = 50
a : List[Any] = [60, 100, 120]
a : Optional[int] = [10, 20, 30]
a : str = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 40
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : Optional[Any] = []
# fmt: off
# stem:
rename_keys.append(('cls_token', 'vit.embeddings.cls_token') )
rename_keys.append(('pos_embed', 'vit.embeddings.position_embeddings') )
rename_keys.append(('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias') )
# backbone
rename_keys.append(('patch_embed.backbone.stem.conv.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.weight', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight') )
rename_keys.append(('patch_embed.backbone.stem.norm.bias', 'vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight""") )
rename_keys.append((f"""patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias""", f"""vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias""") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
('pre_logits.fc.weight', 'pooler.dense.weight'),
('pre_logits.fc.bias', 'pooler.dense.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
# fmt: on
return rename_keys
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
A_ : List[str] = ''
else:
A_ : Dict = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
A_ : List[Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A_ : Tuple = in_proj_bias[: config.hidden_size]
A_ : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Tuple = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Any = dct.pop(_UpperCAmelCase )
A_ : Optional[int] = val
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A_ : int = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
A_ : List[Any] = BitConfig(
global_padding='same' , layer_type='bottleneck' , depths=(3, 4, 9) , out_features=['stage3'] , embedding_dynamic_padding=_UpperCAmelCase , )
A_ : Optional[int] = ViTHybridConfig(backbone_config=_UpperCAmelCase , image_size=384 , num_labels=1000 )
A_ : Union[str, Any] = False
# load original model from timm
A_ : List[Any] = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Tuple = timm_model.state_dict()
if base_model:
remove_classification_head_(_UpperCAmelCase )
A_ : Any = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
A_ : Union[str, Any] = 'huggingface/label-files'
A_ : Dict = 'imagenet-1k-id2label.json'
A_ : List[str] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) , 'r' ) )
A_ : str = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Optional[int] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : List[Any] = ViTHybridModel(_UpperCAmelCase ).eval()
else:
A_ : str = ViTHybridForImageClassification(_UpperCAmelCase ).eval()
model.load_state_dict(_UpperCAmelCase )
# create image processor
A_ : Dict = create_transform(**resolve_data_config({} , model=_UpperCAmelCase ) )
A_ : List[str] = transform.transforms
A_ : List[str] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
A_ : Tuple = ViTHybridImageProcessor(
do_resize=_UpperCAmelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_UpperCAmelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=_UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
A_ : Optional[Any] = prepare_img()
A_ : Any = transform(_UpperCAmelCase ).unsqueeze(0 )
A_ : Dict = processor(_UpperCAmelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(_UpperCAmelCase , _UpperCAmelCase )
# verify logits
with torch.no_grad():
A_ : List[Any] = model(_UpperCAmelCase )
A_ : List[str] = outputs.logits
print('Predicted class:' , logits.argmax(-1 ).item() )
if base_model:
A_ : Union[str, Any] = timm_model.forward_features(_UpperCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_UpperCAmelCase , outputs.pooler_output , atol=1E-3 )
else:
A_ : Tuple = timm_model(_UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_UpperCAmelCase , outputs.logits , atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor to the hub {vit_name}""" )
model.push_to_hub(f"""ybelkada/{vit_name}""" )
processor.push_to_hub(f"""ybelkada/{vit_name}""" )
if __name__ == "__main__":
lowerCamelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
lowerCamelCase_ : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 286
| 0
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ (self ):
"""simple docstring"""
a = FlaxAlbertModelTester(self )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("albert-base-v2" )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
@require_flax
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = FlaxAlbertModel.from_pretrained("albert-base-v2" )
a = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
a = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0]
a = (1, 11, 768)
self.assertEqual(output.shape , lowerCamelCase_ )
a = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1E-4 ) )
| 353
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 71
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
A_ : str = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
A_ : List[Any] = dict(zip(snake_case , range(len(snake_case ) ) ) )
A_ : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
A_ : Dict = {"unk_token": "<unk>"}
A_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case ) )
A_ : Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
A_ : Optional[Any] = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , **snake_case :Tuple ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , **snake_case :str ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] , **snake_case :List[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Optional[Any] = self.get_tokenizer()
A_ : Tuple = self.get_rust_tokenizer()
A_ : str = self.get_image_processor()
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
A_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case )
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
A_ : List[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case )
self.assertIsInstance(processor_fast.tokenizer , snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case )
self.assertIsInstance(processor_fast.image_processor , snake_case )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A_ : Union[str, Any] = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
A_ : Optional[int] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Optional[Any] = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
A_ : List[str] = self.prepare_image_inputs()
A_ : str = image_processor(snake_case , return_tensors="np" )
A_ : Optional[int] = processor(images=snake_case , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Optional[Any] = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : List[str] = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
A_ : Optional[int] = "lower newer"
A_ : List[Any] = processor(text=snake_case )
A_ : int = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[Any] = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : int = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
A_ : str = "lower newer"
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : List[str] = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Any = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : Dict = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
A_ : List[str] = self.prepare_image_inputs()
A_ : Dict = self.prepare_image_inputs()
A_ : int = processor(images=snake_case , visual_prompt=snake_case )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : Tuple = self.get_image_processor()
A_ : int = self.get_tokenizer()
A_ : Dict = CLIPSegProcessor(tokenizer=snake_case , image_processor=snake_case )
A_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Union[str, Any] = processor.batch_decode(snake_case )
A_ : List[str] = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
| 300
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
A_ : Tuple = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm1.weight", f"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm1.bias", f"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.weight", f"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.attn.proj.bias", f"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.norm2.weight", f"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.norm2.bias", f"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.weight", f"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc1.bias", f"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(f"encoder.deit.blocks.{i}.mlp.fc2.weight", f"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"encoder.deit.blocks.{i}.mlp.fc2.bias", f"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Dict:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
A_ : str = state_dict.pop(f"encoder.deit.blocks.{i}.attn.qkv.weight" )
A_ : List[Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
A_ : Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ) -> Any:
A_ : Dict = dct.pop(_lowerCAmelCase )
A_ : List[Any] = val
def __snake_case ( _lowerCAmelCase : List[str] ) -> int:
if "handwritten" in checkpoint_url:
A_ : Any = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Any = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
A_ : List[Any] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> List[Any]:
A_ : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_lowerCAmelCase )
A_ : Tuple = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
A_ : Tuple = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
A_ : Optional[Any] = 1024
A_ : Union[str, Any] = 4096
A_ : Union[str, Any] = 24
A_ : List[Any] = 16
A_ : List[str] = 1024
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
A_ : Dict = False
A_ : int = "relu"
A_ : Optional[int] = 1024
A_ : Any = True
A_ : List[Any] = False
A_ : Optional[int] = False
# load HuggingFace model
A_ : Union[str, Any] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase )
A_ : str = TrOCRForCausalLM(_lowerCAmelCase )
A_ : List[str] = VisionEncoderDecoderModel(encoder=_lowerCAmelCase , decoder=_lowerCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
A_ : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" , check_hash=_lowerCAmelCase )["model"]
A_ : Dict = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
A_ : Dict = state_dict.pop(_lowerCAmelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
A_ : List[str] = val
else:
A_ : Optional[Any] = val
# load state dict
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image
A_ : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
A_ : Any = RobertaTokenizer.from_pretrained("roberta-large" )
A_ : Union[str, Any] = TrOCRProcessor(_lowerCAmelCase , _lowerCAmelCase )
A_ : List[str] = processor(images=prepare_img(_lowerCAmelCase ) , return_tensors="pt" ).pixel_values
# verify logits
A_ : Union[str, Any] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
A_ : Optional[int] = model(pixel_values=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
A_ : Tuple = outputs.logits
A_ : Union[str, Any] = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
A_ : Union[str, Any] = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
A_ : str = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
A_ : Optional[Any] = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
A_ : Optional[int] = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _lowerCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300
| 1
|
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : int = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class SCREAMING_SNAKE_CASE__ ( __a ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = BartphoTokenizer
_UpperCAmelCase : Any = False
_UpperCAmelCase : List[Any] = True
def A ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case = ['▁This', '▁is', '▁a', '▁t', 'est']
_snake_case = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
_snake_case = BartphoTokenizer(UpperCamelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : List[str] , **lowercase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A ( self : Tuple , lowercase : Any ):
'''simple docstring'''
_snake_case = 'This is a là test'
_snake_case = 'This is a<unk><unk> test'
return input_text, output_text
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = BartphoTokenizer(UpperCamelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
_snake_case = 'This is a là test'
_snake_case = '▁This ▁is ▁a ▁l à ▁t est'.split()
_snake_case = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 369
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = "canine"
def __init__( self : int , lowercase : Optional[int]=768 , lowercase : Tuple=12 , lowercase : Union[str, Any]=12 , lowercase : Optional[int]=3_072 , lowercase : Tuple="gelu" , lowercase : Optional[Any]=0.1 , lowercase : Tuple=0.1 , lowercase : int=16_384 , lowercase : Optional[int]=16 , lowercase : Optional[int]=0.02 , lowercase : Optional[Any]=1E-12 , lowercase : Optional[Any]=0 , lowercase : Dict=0xE000 , lowercase : Optional[Any]=0xE001 , lowercase : Union[str, Any]=4 , lowercase : str=4 , lowercase : Optional[int]=8 , lowercase : List[str]=16_384 , lowercase : Union[str, Any]=128 , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
# Character config:
_snake_case = downsampling_rate
_snake_case = upsampling_kernel_size
_snake_case = num_hash_functions
_snake_case = num_hash_buckets
_snake_case = local_transformer_stride
| 130
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : Optional[int] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__a ):
_a = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_a = FlaxAutoModel.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def UpperCamelCase__ ( self : int ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__a ):
_a = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
_a = FlaxAutoModel.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_a = AutoTokenizer.from_pretrained(__a )
_a = FlaxBertModel.from_pretrained(__a )
_a = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__a : Optional[Any] ):
return model(**__a )
eval(**__a ).block_until_ready()
@slow
def UpperCamelCase__ ( self : Dict ):
for model_name in ["roberta-base", "roberta-large"]:
_a = AutoTokenizer.from_pretrained(__a )
_a = FlaxRobertaModel.from_pretrained(__a )
_a = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__a : str ):
return model(**__a )
eval(**__a ).block_until_ready()
def UpperCamelCase__ ( self : Any ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = FlaxAutoModel.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : int ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = FlaxAutoModel.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : Dict ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
_a = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : str ):
with self.assertRaisesRegex(__a , "Use `from_pt=True` to load this model" ):
_a = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 63
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : List[Any]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : int=64 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Union[str, Any]=4 , __lowerCAmelCase : Union[str, Any]=64 , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : str=512 , __lowerCAmelCase : Any=16 , __lowerCAmelCase : Tuple=2 , __lowerCAmelCase : Tuple=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=4 , __lowerCAmelCase : str=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : Union[str, Any] ):
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Optional[int] ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str ):
_UpperCAmelCase = MPNetModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
_UpperCAmelCase = MPNetForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MPNetForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MPNetForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MPNetForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_snake_case : Union[str, Any] = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : int = False
_snake_case : List[Any] = True
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = MPNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Dict ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCAmelCase )
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
_UpperCAmelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 289
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__snake_case = '''base_with_context'''
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any =nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
UpperCAmelCase : Union[str, Any] =nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase : Any =weights[f'''layers_{lyr_num}''']
UpperCAmelCase : Dict =nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
UpperCAmelCase : List[str] =ly_weight['''attention''']
UpperCAmelCase : Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCAmelCase : Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCAmelCase : Tuple =nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCAmelCase : Dict =nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCAmelCase : Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCAmelCase : Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCAmelCase : Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCAmelCase : List[str] =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCAmelCase : Optional[int] =nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
UpperCAmelCase : List[Any] =nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase : Optional[int] =weights[f'''layers_{lyr_num}''']
UpperCAmelCase : Tuple =ly_weight['''attention''']
UpperCAmelCase : str =nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCAmelCase : Any =nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCAmelCase : Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCAmelCase : Dict =nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCAmelCase : List[Any] =nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
UpperCAmelCase : Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCAmelCase : Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCAmelCase : Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCAmelCase : str =nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCAmelCase : Tuple =nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
UpperCAmelCase : str =nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
UpperCAmelCase : List[Any] =nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__lowerCAmelCase )
UpperCAmelCase : Any =nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase : Dict =weights[f'''layers_{lyr_num}''']
UpperCAmelCase : Tuple =nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
UpperCAmelCase : Any =nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
UpperCAmelCase : Optional[int] =ly_weight['''self_attention''']
UpperCAmelCase : str =nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCAmelCase : List[Any] =nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCAmelCase : Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCAmelCase : List[str] =nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCAmelCase : Optional[int] =ly_weight['''MultiHeadDotProductAttention_0''']
UpperCAmelCase : List[Any] =nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
UpperCAmelCase : Any =nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
UpperCAmelCase : Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
UpperCAmelCase : Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
UpperCAmelCase : Optional[int] =nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
UpperCAmelCase : List[str] =nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
UpperCAmelCase : int =nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
UpperCAmelCase : List[str] =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
UpperCAmelCase : Dict =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
UpperCAmelCase : Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
UpperCAmelCase : str =nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
UpperCAmelCase : int =nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def lowerCAmelCase_ ( __lowerCAmelCase )-> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase : str =jnp.tree_util.tree_map(onp.array , __lowerCAmelCase )
UpperCAmelCase : int =[
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
UpperCAmelCase : List[str] =os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
UpperCAmelCase : Union[str, Any] =inference.parse_training_gin_file(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Tuple =inference.InferenceModel(args.checkpoint_path , __lowerCAmelCase )
UpperCAmelCase : Union[str, Any] =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
UpperCAmelCase : Dict =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
UpperCAmelCase : str =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
UpperCAmelCase : Tuple =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCAmelCase : Optional[Any] =load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , __lowerCAmelCase )
UpperCAmelCase : List[str] =load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , __lowerCAmelCase )
UpperCAmelCase : Union[str, Any] =load_decoder(ta_checkpoint['''target''']['''decoder'''] , __lowerCAmelCase )
UpperCAmelCase : str =OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
UpperCAmelCase : Optional[Any] =SpectrogramDiffusionPipeline(
notes_encoder=__lowerCAmelCase , continuous_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase , scheduler=__lowerCAmelCase , melgan=__lowerCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
__snake_case = parser.parse_args()
main(args)
| 78
|
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__snake_case = [
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
__snake_case = [
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def lowerCAmelCase_ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : List[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase : Any ='''rougeLsum'''
UpperCAmelCase : Optional[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
UpperCAmelCase : List[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCAmelCase_ ( )-> Any:
'''simple docstring'''
UpperCAmelCase : str =['''rouge1''', '''rouge2''', '''rougeL''']
UpperCAmelCase : int =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
UpperCAmelCase : Tuple =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase )
assert score_sep == score_no_sep
def lowerCAmelCase_ ( )-> Dict:
'''simple docstring'''
UpperCAmelCase : int =[
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
UpperCAmelCase : Any =[
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase ) == calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase )
def lowerCAmelCase_ ( )-> List[str]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =[
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
UpperCAmelCase : Optional[Any] =[
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
UpperCAmelCase : Optional[int] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=['''rougeLsum'''] , newline_sep=__lowerCAmelCase )['''rougeLsum''']
UpperCAmelCase : int =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def lowerCAmelCase_ ( )-> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] =Path('''examples/seq2seq/test_data/wmt_en_ro''' )
UpperCAmelCase : Tuple =calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase : Dict =calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
| 78
| 1
|
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase__ = pytest.mark.integration
@require_faiss
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(snake_case__ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : Dataset = self._create_dummy_dataset()
lowerCAmelCase : Union[str, Any] = dset.map(
lambda snake_case__ , snake_case__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=snake_case__ , keep_in_memory=snake_case__ )
lowerCAmelCase : Union[str, Any] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCAmelCase , lowerCAmelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
dset.drop_index("vecs" )
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=snake_case__ ) as tmp_file:
dset.save_faiss_index("vecs" , tmp_file.name )
dset.load_faiss_index("vecs2" , tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase , lowerCAmelCase : int = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(snake_case__ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) )
def lowercase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCAmelCase : Dataset = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCAmelCase : List[str] = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCAmelCase : List[str] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
lowerCAmelCase : str = Elasticsearch()
dset.add_elasticsearch_index("filename" , es_client=snake_case__ )
lowerCAmelCase , lowerCAmelCase : int = dset.get_nearest_examples("filename" , "my_name-train_29" )
self.assertEqual(examples["filename"][0] , "my_name-train_29" )
@require_faiss
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCAmelCase : int = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase , lowerCAmelCase : Optional[Any] = index.search(snake_case__ )
self.assertRaises(snake_case__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCAmelCase : Union[str, Any] = np.eye(5 , dtype=np.floataa )[::-1]
lowerCAmelCase , lowerCAmelCase : str = index.search_batch(snake_case__ )
self.assertRaises(snake_case__ , index.search_batch , queries[0] )
lowerCAmelCase : Optional[int] = [scores[0] for scores in total_scores]
lowerCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(snake_case__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : Dict = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCAmelCase : Union[str, Any] = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(snake_case__ ):
lowerCAmelCase : List[Any] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) )
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : Any = faiss.IndexFlat(5 )
lowerCAmelCase : Union[str, Any] = FaissIndex(custom_index=snake_case__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase__ ( self ):
"""simple docstring"""
import faiss
lowerCAmelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=snake_case__ ) as tmp_file:
index.save(tmp_file.name )
lowerCAmelCase : int = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCAmelCase : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase : List[str] = 1
lowerCAmelCase , lowerCAmelCase : Tuple = index.search(snake_case__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a__ ( SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
import faiss
lowerCAmelCase : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCAmelCase : Union[str, Any] = "index.faiss"
lowerCAmelCase : List[str] = f"""mock://{index_name}"""
index.save(SCREAMING_SNAKE_CASE , storage_options=mockfs.storage_options )
lowerCAmelCase : Optional[Any] = FaissIndex.load(SCREAMING_SNAKE_CASE , storage_options=mockfs.storage_options )
lowerCAmelCase : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCAmelCase : Any = 1
lowerCAmelCase , lowerCAmelCase : Optional[int] = index.search(SCREAMING_SNAKE_CASE )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
lowerCAmelCase : List[str] = Elasticsearch()
lowerCAmelCase : Dict = {"acknowledged": True}
lowerCAmelCase : Optional[int] = ElasticSearchIndex(es_client=snake_case__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
lowerCAmelCase : List[str] = "foo"
lowerCAmelCase : List[str] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCAmelCase , lowerCAmelCase : Optional[int] = index.search(snake_case__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCAmelCase : int = "foo"
lowerCAmelCase : Any = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
lowerCAmelCase , lowerCAmelCase : str = index.search(snake_case__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCAmelCase : Any = ["foo", "bar", "foobar"]
lowerCAmelCase : Optional[int] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCAmelCase , lowerCAmelCase : Any = index.search_batch(snake_case__ )
lowerCAmelCase : Tuple = [scores[0] for scores in total_scores]
lowerCAmelCase : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(snake_case__ ) , 0 )
self.assertListEqual([1, 1, 1] , snake_case__ )
# batched queries with timeout
lowerCAmelCase : Optional[Any] = ["foo", "bar", "foobar"]
lowerCAmelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
lowerCAmelCase , lowerCAmelCase : Any = index.search_batch(snake_case__ , request_timeout=30 )
lowerCAmelCase : Dict = [scores[0] for scores in total_scores]
lowerCAmelCase : int = [indices[0] for indices in total_indices]
self.assertGreater(np.min(snake_case__ ) , 0 )
self.assertListEqual([1, 1, 1] , snake_case__ )
| 108
|
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCamelCase ( unittest.TestCase , lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : int ) -> Any:
'''simple docstring'''
A__ : int =load_tool("""text-to-speech""" )
self.tool.setup()
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A__ : List[str] =self.tool("""hey""" )
A__ : Dict =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A__ : Optional[int] =self.tool("""hey""" )
A__ : Tuple =result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
| 134
| 0
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase = logging.get_logger(__name__)
# General docstring
lowerCAmelCase = 'RegNetConfig'
# Base docstring
lowerCAmelCase = 'facebook/regnet-y-040'
lowerCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
lowerCAmelCase = 'facebook/regnet-y-040'
lowerCAmelCase = 'tabby, tabby cat'
lowerCAmelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class _a ( nn.Module ):
def __init__( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int = 3 , UpperCamelCase_: int = 1 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[str] = "relu" , ) -> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Convad(
UpperCamelCase_ , UpperCamelCase_ , kernel_size=UpperCamelCase_ , stride=UpperCamelCase_ , padding=kernel_size // 2 , groups=UpperCamelCase_ , bias=UpperCamelCase_ , )
lowercase__ = nn.BatchNormad(UpperCamelCase_ )
lowercase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[str] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.convolution(UpperCamelCase_ )
lowercase__ = self.normalization(UpperCamelCase_ )
lowercase__ = self.activation(UpperCamelCase_ )
return hidden_state
class _a ( nn.Module ):
def __init__( self: Union[str, Any] , UpperCamelCase_: RegNetConfig ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase__ = config.num_channels
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowercase__ = self.embedder(UpperCamelCase_ )
return hidden_state
class _a ( nn.Module ):
def __init__( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int = 2 ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 , stride=UpperCamelCase_ , bias=UpperCamelCase_ )
lowercase__ = nn.BatchNormad(UpperCamelCase_ )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tensor ) -> Tensor:
"""simple docstring"""
lowercase__ = self.convolution(UpperCamelCase_ )
lowercase__ = self.normalization(UpperCamelCase_ )
return hidden_state
class _a ( nn.Module ):
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: int ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.AdaptiveAvgPoolad((1, 1) )
lowercase__ = nn.Sequential(
nn.Convad(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 ) , nn.Sigmoid() , )
def lowerCamelCase_ ( self: int , UpperCamelCase_: int ) -> Any:
"""simple docstring"""
lowercase__ = self.pooler(UpperCamelCase_ )
lowercase__ = self.attention(UpperCamelCase_ )
lowercase__ = hidden_state * attention
return hidden_state
class _a ( nn.Module ):
def __init__( self: Tuple , UpperCamelCase_: RegNetConfig , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int = 1 ) -> str:
"""simple docstring"""
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1 , out_channels // config.groups_width )
lowercase__ = (
RegNetShortCut(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
RegNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ , groups=UpperCamelCase_ , activation=config.hidden_act ) , RegNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 , activation=UpperCamelCase_ ) , )
lowercase__ = ACTaFN[config.hidden_act]
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = hidden_state
lowercase__ = self.layer(UpperCamelCase_ )
lowercase__ = self.shortcut(UpperCamelCase_ )
hidden_state += residual
lowercase__ = self.activation(UpperCamelCase_ )
return hidden_state
class _a ( nn.Module ):
def __init__( self: Optional[Any] , UpperCamelCase_: RegNetConfig , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int = 1 ) -> Tuple:
"""simple docstring"""
super().__init__()
lowercase__ = in_channels != out_channels or stride != 1
lowercase__ = max(1 , out_channels // config.groups_width )
lowercase__ = (
RegNetShortCut(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase__ = nn.Sequential(
RegNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ , groups=UpperCamelCase_ , activation=config.hidden_act ) , RegNetSELayer(UpperCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(UpperCamelCase_ , UpperCamelCase_ , kernel_size=1 , activation=UpperCamelCase_ ) , )
lowercase__ = ACTaFN[config.hidden_act]
def lowerCamelCase_ ( self: str , UpperCamelCase_: List[str] ) -> int:
"""simple docstring"""
lowercase__ = hidden_state
lowercase__ = self.layer(UpperCamelCase_ )
lowercase__ = self.shortcut(UpperCamelCase_ )
hidden_state += residual
lowercase__ = self.activation(UpperCamelCase_ )
return hidden_state
class _a ( nn.Module ):
def __init__( self: Union[str, Any] , UpperCamelCase_: RegNetConfig , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: int = 2 , UpperCamelCase_: int = 2 , ) -> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowercase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , stride=UpperCamelCase_ , ) , *[layer(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for _ in range(depth - 1 )] , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.layers(UpperCamelCase_ )
return hidden_state
class _a ( nn.Module ):
def __init__( self: Dict , UpperCamelCase_: RegNetConfig ) -> str:
"""simple docstring"""
super().__init__()
lowercase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
UpperCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCamelCase_ , config.depths[1:] ):
self.stages.append(RegNetStage(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , depth=UpperCamelCase_ ) )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Tensor , UpperCamelCase_: bool = False , UpperCamelCase_: bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
lowercase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
lowercase__ = stage_module(UpperCamelCase_ )
if output_hidden_states:
lowercase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase_ , hidden_states=UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = RegNetConfig
_lowercase : str = '''regnet'''
_lowercase : Tuple = '''pixel_values'''
_lowercase : Dict = True
def lowerCamelCase_ ( self: int , UpperCamelCase_: Optional[Any] ) -> str:
"""simple docstring"""
if isinstance(UpperCamelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(UpperCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Tuple=False ) -> Optional[int]:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = value
lowerCAmelCase = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , UpperCamelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class _a ( UpperCamelCase__ ):
def __init__( self: List[str] , UpperCamelCase_: List[Any] ) -> Any:
"""simple docstring"""
super().__init__(UpperCamelCase_ )
lowercase__ = config
lowercase__ = RegNetEmbeddings(UpperCamelCase_ )
lowercase__ = RegNetEncoder(UpperCamelCase_ )
lowercase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Tensor , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
lowercase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.embedder(UpperCamelCase_ )
lowercase__ = self.encoder(
UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ )
lowercase__ = encoder_outputs[0]
lowercase__ = self.pooler(UpperCamelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCamelCase_ , pooler_output=UpperCamelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , UpperCamelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class _a ( UpperCamelCase__ ):
def __init__( self: Union[str, Any] , UpperCamelCase_: List[str] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(UpperCamelCase_ )
lowercase__ = config.num_labels
lowercase__ = RegNetModel(UpperCamelCase_ )
# classification head
lowercase__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[torch.LongTensor] = None , UpperCamelCase_: Optional[bool] = None , UpperCamelCase_: Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ = self.regnet(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ )
lowercase__ = outputs.pooler_output if return_dict else outputs[1]
lowercase__ = self.classifier(UpperCamelCase_ )
lowercase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase__ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase__ = '''single_label_classification'''
else:
lowercase__ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowercase__ = MSELoss()
if self.num_labels == 1:
lowercase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase__ = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
elif self.config.problem_type == "single_label_classification":
lowercase__ = CrossEntropyLoss()
lowercase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase__ = BCEWithLogitsLoss()
lowercase__ = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
lowercase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase_ , logits=UpperCamelCase_ , hidden_states=outputs.hidden_states )
| 93
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
lowerCAmelCase = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
lowerCAmelCase = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
lowerCAmelCase = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def lowerCamelCase_ ( self: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: List[str] ) -> Union[str, Any]:
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Dict=0.9 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: Optional[int]=0.5 ) -> Dict:
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
lowercase__ = [
meteor_score.single_meteor_score(
word_tokenize(UpperCamelCase_ ) , word_tokenize(UpperCamelCase_ ) , alpha=UpperCamelCase_ , beta=UpperCamelCase_ , gamma=UpperCamelCase_ )
for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ )
]
else:
lowercase__ = [
meteor_score.single_meteor_score(UpperCamelCase_ , UpperCamelCase_ , alpha=UpperCamelCase_ , beta=UpperCamelCase_ , gamma=UpperCamelCase_ )
for ref, pred in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return {"meteor": np.mean(UpperCamelCase_ )}
| 93
| 1
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , a__="</s>" , a__="<unk>" , a__="<pad>" , a__=125 , a__=None , **a__ , ):
if extra_ids > 0 and additional_special_tokens is None:
_lowerCAmelCase : Optional[int] = [F"<extra_id_{i}>" for i in range(lowerCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_lowerCAmelCase : int = len(set(filter(lambda a__ : bool("""extra_id""" in str(lowerCamelCase__ ) ) , lowerCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
_lowerCAmelCase : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
_lowerCAmelCase : int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
_lowerCAmelCase : str = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
super().__init__(
eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , extra_ids=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
_lowerCAmelCase : List[str] = extra_ids
_lowerCAmelCase : List[Any] = 2**8 # utf is 8 bits
# define special tokens dict
_lowerCAmelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_lowerCAmelCase : Tuple = len(self.special_tokens_encoder )
_lowerCAmelCase : List[str] = len(lowerCamelCase__ )
for i, token in enumerate(lowerCamelCase__ ):
_lowerCAmelCase : Optional[int] = self.vocab_size + i - n
_lowerCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __A ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase__ )) + [1]
return ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def __A ( self , a__ ):
if len(lowerCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Union[str, Any] = self._add_eos_if_not_present(lowerCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
_lowerCAmelCase : List[Any] = self._add_eos_if_not_present(lowerCamelCase__ )
return token_ids_a + token_ids_a
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = [chr(lowerCamelCase__ ) for i in text.encode("""utf-8""" )]
return tokens
def __A ( self , a__ ):
if token in self.special_tokens_encoder:
_lowerCAmelCase : Any = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_lowerCAmelCase : List[str] = self.added_tokens_encoder[token]
elif len(lowerCamelCase__ ) != 1:
_lowerCAmelCase : List[Any] = self.unk_token_id
else:
_lowerCAmelCase : List[str] = ord(lowerCamelCase__ ) + self._num_special_tokens
return token_id
def __A ( self , a__ ):
if index in self.special_tokens_decoder:
_lowerCAmelCase : str = self.special_tokens_decoder[index]
else:
_lowerCAmelCase : int = chr(index - self._num_special_tokens )
return token
def __A ( self , a__ ):
_lowerCAmelCase : Union[str, Any] = b''
for token in tokens:
if token in self.special_tokens_decoder:
_lowerCAmelCase : List[str] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
_lowerCAmelCase : List[Any] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
_lowerCAmelCase : str = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
_lowerCAmelCase : List[Any] = token.encode("""utf-8""" )
else:
_lowerCAmelCase : Union[str, Any] = bytes([ord(lowerCamelCase__ )] )
bstring += tok_string
_lowerCAmelCase : str = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def __A ( self , a__ , a__ = None ):
return ()
| 44
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __A :
"""simple docstring"""
UpperCamelCase__ : int =XGLMConfig
UpperCamelCase__ : Optional[Any] ={}
UpperCamelCase__ : List[str] ="""gelu"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=14 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=0.02 , ):
"""simple docstring"""
__UpperCamelCase : Tuple =parent
__UpperCamelCase : List[str] =batch_size
__UpperCamelCase : str =seq_length
__UpperCamelCase : Dict =is_training
__UpperCamelCase : Tuple =use_input_mask
__UpperCamelCase : List[Any] =use_labels
__UpperCamelCase : Any =vocab_size
__UpperCamelCase : List[Any] =d_model
__UpperCamelCase : Optional[int] =num_hidden_layers
__UpperCamelCase : List[str] =num_attention_heads
__UpperCamelCase : Optional[int] =ffn_dim
__UpperCamelCase : str =activation_function
__UpperCamelCase : Any =activation_dropout
__UpperCamelCase : Optional[int] =attention_dropout
__UpperCamelCase : Optional[int] =max_position_embeddings
__UpperCamelCase : Any =initializer_range
__UpperCamelCase : Dict =None
__UpperCamelCase : Optional[int] =0
__UpperCamelCase : Optional[Any] =2
__UpperCamelCase : str =1
def __lowercase ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__UpperCamelCase : Union[str, Any] =None
if self.use_input_mask:
__UpperCamelCase : Dict =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase : Any =self.get_config()
__UpperCamelCase : Optional[Any] =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __lowercase ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCamelCase__ , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) : int =config_and_inputs
__UpperCamelCase : Optional[Any] ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase__ : str =(TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ : Optional[Any] =(
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : Optional[Any] =False
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =TFXGLMModelTester(self )
__UpperCamelCase : Dict =ConfigTester(self , config_class=lowerCamelCase__ , n_embd=37 )
def __lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Optional[Any] =TFXGLMModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __lowercase ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self , lowerCamelCase__=True ):
"""simple docstring"""
__UpperCamelCase : int =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : List[str] =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase : str =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase : Optional[Any] =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Union[str, Any] =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase : str =tokenizer('Today is a nice day and' , return_tensors='tf' )
__UpperCamelCase : Union[str, Any] =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase : Any =model.generate(lowerCamelCase__ , do_sample=lowerCamelCase__ , seed=[7, 0] )
__UpperCamelCase : Tuple =tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : List[Any] =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Optional[Any] =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase : Optional[Any] ='left'
# use different length sentences to test batching
__UpperCamelCase : Optional[int] =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase : List[Any] =tokenizer(lowerCamelCase__ , return_tensors='tf' , padding=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =inputs['input_ids']
__UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__UpperCamelCase : List[Any] =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__UpperCamelCase : Dict =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 )
__UpperCamelCase : Any =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__UpperCamelCase : Optional[Any] =model.generate(input_ids=lowerCamelCase__ , max_new_tokens=12 )
__UpperCamelCase : Optional[int] =tokenizer.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : int =tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Any =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , [non_padded_sentence, padded_sentence] )
| 71
| 0
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = ["image_processor", "tokenizer"]
A_ = "BridgeTowerImageProcessor"
A_ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self: Any , __A: Optional[int] , __A: str ) -> Tuple:
super().__init__(__A , __A )
def __call__( self: Union[str, Any] , __A: Any , __A: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A: bool = True , __A: Union[bool, str, PaddingStrategy] = False , __A: Union[bool, str, TruncationStrategy] = None , __A: Optional[int] = None , __A: int = 0 , __A: Optional[int] = None , __A: Optional[bool] = None , __A: Optional[bool] = None , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = False , __A: bool = True , __A: Optional[Union[str, TensorType]] = None , **__A: List[Any] , ) -> BatchEncoding:
_A = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
_A = self.image_processor(
__A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A )
encoding.update(__A )
return encoding
def __A ( self: List[Any] , *__A: Any , **__A: Tuple ) -> Optional[int]:
return self.tokenizer.batch_decode(*__A , **__A )
def __A ( self: Tuple , *__A: List[str] , **__A: str ) -> int:
return self.tokenizer.decode(*__A , **__A )
@property
def __A ( self: int ) -> int:
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 365
|
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__A = yaml.safe_load(
'\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n'
)
__A = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = {
'name': 'root',
'text': '',
'is_empty_text': True,
'subsections': [
{
'name': 'Dataset Card for My Dataset',
'text': '',
'is_empty_text': True,
'subsections': [
{'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []},
{
'name': 'Dataset Description',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Dataset Summary',
'text': 'Some text here.',
'is_empty_text': False,
'subsections': [
{
'name': 'Extra Ignored Subsection',
'text': '',
'is_empty_text': True,
'subsections': [],
}
],
},
{
'name': 'Supported Tasks and Leaderboards',
'text': '',
'is_empty_text': True,
'subsections': [],
},
{'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []},
],
},
],
}
],
}
__A = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = (
'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'
)
__A = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = (
'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'
)
__A = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'
__A = ''
__A = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'
__A = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n'
__A = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
assert ReadMe.from_string(_lowercase , _lowercase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path='''root''' ) ) ):
_A = ReadMe.from_string(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with pytest.raises(_lowercase , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase ):
'''simple docstring'''
ReadMe.from_string(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
_A = ReadMe.from_readme(_lowercase , _lowercase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
_A = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
_A = ReadMe.from_readme(_lowercase , _lowercase )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
_A = expected_error.format(path=_lowercase )
with pytest.raises(_lowercase , match=re.escape(_lowercase ) ):
ReadMe.from_readme(_lowercase , _lowercase )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __A ( _lowercase ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
_A = Path(_lowercase ) / '''README.md'''
with open(_lowercase , '''w+''' ) as readme_file:
readme_file.write(_lowercase )
ReadMe.from_readme(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
| 75
| 0
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __lowerCAmelCase ( datasets.BeamBasedBuilder ):
def snake_case ( self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""content""": datasets.Value("""string""" )} ) , supervised_keys=_snake_case , )
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_dummy_examples()} )]
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_snake_case )
class __lowerCAmelCase ( datasets.BeamBasedBuilder ):
def snake_case ( self ):
"""simple docstring"""
return datasets.DatasetInfo(
features=datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) , supervised_keys=_snake_case , )
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""examples""": get_test_nested_examples()} )
]
def snake_case ( self , _snake_case , _snake_case ):
"""simple docstring"""
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_snake_case )
def _UpperCAmelCase ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
def _UpperCAmelCase ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["""foo""", """bar""", """foobar"""] )]
class __lowerCAmelCase ( lowerCamelCase__ ):
@require_beam
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCAmelCase = DummyBeamDataset(cache_dir=_snake_case , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
_lowerCAmelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _snake_case )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _snake_case )
self.assertDictEqual(dset["""train"""][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def snake_case ( self ):
"""simple docstring"""
import apache_beam as beam
_lowerCAmelCase = beam.io.parquetio.WriteToParquet
_lowerCAmelCase = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCAmelCase = DummyBeamDataset(cache_dir=_snake_case , beam_runner="""DirectRunner""" )
with patch("""apache_beam.io.parquetio.WriteToParquet""" ) as write_parquet_mock:
_lowerCAmelCase = partial(_snake_case , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_snake_case , builder.name , """default""" , """0.0.0""" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_snake_case , builder.name , """default""" , """0.0.0""" , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"""content""": datasets.Value("""string""" )} ) )
_lowerCAmelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _snake_case )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _snake_case )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["""train"""]["""content"""] ) , sorted(["""foo""", """bar""", """foobar"""] ) )
self.assertTrue(
os.path.exists(os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
@require_beam
def snake_case ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCAmelCase = DummyBeamDataset(cache_dir=_snake_case )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_lowerCAmelCase = NestedBeamDataset(cache_dir=_snake_case , beam_runner="""DirectRunner""" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"""a""": datasets.Sequence({"""b""": datasets.Value("""string""" )} )} ) )
_lowerCAmelCase = builder.as_dataset()
self.assertEqual(dset["""train"""].num_rows , _snake_case )
self.assertEqual(dset["""train"""].info.splits["""train"""].num_examples , _snake_case )
self.assertDictEqual(dset["""train"""][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["""train"""][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_snake_case , builder.name , """default""" , """0.0.0""" , """dataset_info.json""" ) ) )
del dset
| 82
|
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : str = tempfile.mkdtemp()
lowercase__ : Optional[Any] = 8
# DPR tok
lowercase__ : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase__ : List[Any] = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase__ : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase__ : List[str] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[Any] = {"unk_token": "<unk>"}
lowercase__ : Any = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Any ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def snake_case ( self : Any ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def snake_case ( self : Any ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def snake_case ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Optional[int] ):
lowercase__ : int = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def snake_case ( self : List[str] ):
lowercase__ : Union[str, Any] = self.get_dummy_dataset()
lowercase__ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Union[str, Any] = dataset
lowercase__ : List[str] = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : bool ):
lowercase__ : Union[str, Any] = self.get_dummy_dataset()
lowercase__ : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
lowercase__ : Any = os.path.join(self.tmpdirname , "dataset" )
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
lowercase__ : Tuple = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
lowercase__ : Dict = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE ) , )
return retriever
def snake_case ( self : Tuple ):
lowercase__ : Optional[int] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase__ : Union[str, Any] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
lowercase__ : Optional[int] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
lowercase__ : List[str] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE , open(SCREAMING_SNAKE_CASE , "wb" ) )
lowercase__ : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
lowercase__ : Any = RagRetriever(
SCREAMING_SNAKE_CASE , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def snake_case ( self : int ):
lowercase__ : Any = 1
lowercase__ : str = self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : str ):
lowercase__ : Dict = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
lowercase__ : Tuple = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[str] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : str ):
lowercase__ : Union[str, Any] = 1
lowercase__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[Any] = 1
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : List[str] ):
lowercase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : int = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : Dict = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[Any] = 1
lowercase__ : List[str] = self.get_dummy_legacy_index_retriever()
lowercase__ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ , lowercase__ , lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case ( self : Dict ):
lowercase__ : Optional[int] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : str = retriever.retrieve(SCREAMING_SNAKE_CASE , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case ( self : Any ):
import torch
lowercase__ : List[Any] = 1
lowercase__ : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever()
lowercase__ : Tuple = [[5, 7], [10, 11]]
lowercase__ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : int = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ , lowercase__ : List[str] = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
lowercase__ : List[str] = retriever(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE , return_tensors="pt" , )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case ( self : int ):
lowercase__ : List[Any] = self.get_dpr_ctx_encoder_tokenizer()
lowercase__ : Optional[int] = 1
lowercase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [[5, 7], [10, 11]]
lowercase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
lowercase__ : List[Any] = retriever(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE )
self.assertEqual(
len(SCREAMING_SNAKE_CASE ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , SCREAMING_SNAKE_CASE ) # check for doc token related keys in dictionary.
| 130
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = "glpn"
def __init__(self ,_lowerCamelCase=3 ,_lowerCamelCase=4 ,_lowerCamelCase=[2, 2, 2, 2] ,_lowerCamelCase=[8, 4, 2, 1] ,_lowerCamelCase=[32, 64, 160, 256] ,_lowerCamelCase=[7, 3, 3, 3] ,_lowerCamelCase=[4, 2, 2, 2] ,_lowerCamelCase=[1, 2, 5, 8] ,_lowerCamelCase=[4, 4, 4, 4] ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=0.1 ,_lowerCamelCase=1E-6 ,_lowerCamelCase=64 ,_lowerCamelCase=10 ,_lowerCamelCase=-1 ,**_lowerCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = num_channels
__lowercase = num_encoder_blocks
__lowercase = depths
__lowercase = sr_ratios
__lowercase = hidden_sizes
__lowercase = patch_sizes
__lowercase = strides
__lowercase = mlp_ratios
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = drop_path_rate
__lowercase = layer_norm_eps
__lowercase = decoder_hidden_size
__lowercase = max_depth
__lowercase = head_in_index
| 217
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int = 6_0_0_8_5_1_4_7_5_1_4_3 ):
try:
__lowercase = int(lowerCamelCase_ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__lowercase = 1
__lowercase = 2
while i * i <= n:
while n % i == 0:
__lowercase = i
n //= i
i += 1
if n > 1:
__lowercase = n
return int(lowerCamelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 217
| 1
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase = np.zeros((n + 1,) )
UpperCAmelCase = ya
UpperCAmelCase = xa
for k in range(lowercase_ ):
UpperCAmelCase = y[k] + step_size * ode_func(lowercase_ , y[k] )
UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(lowercase_ , y[k] ) + ode_func(x + step_size , lowercase_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = list(range(len(lowercase_ ) ) )
UpperCAmelCase = [v / w for v, w in zip(lowercase_ , lowercase_ )]
index.sort(key=lambda lowercase_ : ratio[i] , reverse=lowercase_ )
UpperCAmelCase = 0
UpperCAmelCase = [0] * len(lowercase_ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78
| 1
|
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 204
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __snake_case ( unittest.TestCase ):
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
SCREAMING_SNAKE_CASE__ = Vector()
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_lowercase ) , """(0,0,0,0,0,1)""" )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3, 4] )
self.assertEqual(len(_lowercase ) , 4 )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2] )
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3, 4, 5] )
SCREAMING_SNAKE_CASE__ = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE__ = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_36 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_16 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_16 , 3 )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([2, -1, 4] ) # for test of dot product
SCREAMING_SNAKE_CASE__ = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def __a ( self : str ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _lowercase , _lowercase ) ) , """(3,4,7)""" )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 0, 0, 0, 0] )
SCREAMING_SNAKE_CASE__ = x.copy()
self.assertEqual(str(_lowercase ) , str(_lowercase ) )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_lowercase ) , """(0,1,0)""" )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_lowercase , _lowercase ) )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_lowercase , _lowercase ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(_lowercase ) )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
SCREAMING_SNAKE_CASE__ = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def __a ( self : Any ):
"""simple docstring"""
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 204
| 1
|
'''simple docstring'''
def snake_case_ ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Union[str, Any] = int(__SCREAMING_SNAKE_CASE )
if n_element < 1:
lowercase_ : str = ValueError('''a should be a positive number''' )
raise my_error
lowercase_ : str = [1]
lowercase_ , lowercase_ , lowercase_ : Any = (0, 0, 0)
lowercase_ : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_lowercase : str = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
_lowercase : List[Any] = hamming(int(n))
print("-----------------------------------------------------")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("-----------------------------------------------------")
| 93
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[Any] = "▁"
_lowercase : Tuple = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : List[str] = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_lowercase : List[str] = {
"facebook/m2m100_418M": 1_0_2_4,
}
# fmt: off
_lowercase : Tuple = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="<s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="</s>" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="m2m100" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=8 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ : List[Any] = language_codes
lowercase_ : Optional[int] = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowercase_ : List[Any] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
lowercase_ : Union[str, Any] = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__SCREAMING_SNAKE_CASE )
for lang_code in fairseq_language_code
if self.get_lang_token(__SCREAMING_SNAKE_CASE ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , language_codes=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase_ : int = vocab_file
lowercase_ : Any = load_json(__SCREAMING_SNAKE_CASE )
lowercase_ : str = {v: k for k, v in self.encoder.items()}
lowercase_ : Optional[int] = spm_file
lowercase_ : Any = load_spm(__SCREAMING_SNAKE_CASE , self.sp_model_kwargs )
lowercase_ : List[Any] = len(self.encoder )
lowercase_ : Dict = {
self.get_lang_token(__SCREAMING_SNAKE_CASE ): self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )
}
lowercase_ : Optional[int] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__SCREAMING_SNAKE_CASE )}
lowercase_ : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()}
lowercase_ : Tuple = src_lang if src_lang is not None else '''en'''
lowercase_ : Optional[int] = tgt_lang
lowercase_ : Any = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowercase_ : Dict = num_madeup_words
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder[self.unk_token] )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__SCREAMING_SNAKE_CASE , self.unk_token )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Tuple = []
lowercase_ : List[str] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowercase_ : Optional[Any] = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = [1] * len(self.prefix_tokens )
lowercase_ : Any = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.__dict__.copy()
lowercase_ : List[Any] = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : List[Any] = {}
lowercase_ : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
lowercase_ : Tuple = Path(__SCREAMING_SNAKE_CASE )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
lowercase_ : Dict = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
lowercase_ : Dict = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , __SCREAMING_SNAKE_CASE )
if os.path.abspath(self.spm_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.spm_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : int = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (str(__SCREAMING_SNAKE_CASE ), str(__SCREAMING_SNAKE_CASE ))
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "en" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "ro" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[Any] = src_lang
lowercase_ : List[str] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ : Tuple = src_lang
lowercase_ : Any = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = self.get_lang_id(__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = self.get_lang_token(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = self.lang_token_to_id[lang_token]
lowercase_ : Optional[Any] = [self.cur_lang_id]
lowercase_ : Union[str, Any] = [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = self.get_lang_token(__SCREAMING_SNAKE_CASE )
lowercase_ : Any = self.lang_token_to_id[lang_token]
lowercase_ : str = [self.cur_lang_id]
lowercase_ : List[str] = [self.eos_token_id]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.lang_code_to_token[lang]
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = self.get_lang_token(__SCREAMING_SNAKE_CASE )
return self.lang_token_to_id[lang_token]
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict[str, Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = sentencepiece.SentencePieceProcessor(**__SCREAMING_SNAKE_CASE )
spm.Load(str(__SCREAMING_SNAKE_CASE ) )
return spm
def snake_case_ ( __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''r''' ) as f:
return json.load(__SCREAMING_SNAKE_CASE )
def snake_case_ ( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , '''w''' ) as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , indent=2 )
| 93
| 1
|
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
while a != 0:
UpperCAmelCase , UpperCAmelCase : Tuple = b % a, a
return b
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
UpperCAmelCase : List[str] = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(UpperCAmelCase_ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = 1, 0, a
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = 0, 1, m
while va != 0:
UpperCAmelCase : Tuple = ua // va
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 280
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if len(UpperCAmelCase_ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(UpperCAmelCase_ )
or left < -len(UpperCAmelCase_ )
or right >= len(UpperCAmelCase_ )
or right < -len(UpperCAmelCase_ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
UpperCAmelCase : Optional[int] = (left + right) >> 1 # the middle
UpperCAmelCase : Any = find_max(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # find max in range[left, mid]
UpperCAmelCase : Union[str, Any] = find_max(UpperCAmelCase_ , mid + 1 , UpperCAmelCase_ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 280
| 1
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowercase__ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = True , ):
_lowerCamelCase : str = [file for file in os.listdir(lowercase ) if os.path.isfile(os.path.join(lowercase , lowercase ) )]
if identifier is not None:
_lowerCamelCase : Any = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase , lowercase ):
for n_ in n_identifier:
_lowerCamelCase : Optional[int] = [file for file in files if n_ not in file]
else:
_lowerCamelCase : List[Any] = [file for file in files if n_identifier not in file]
_lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py' )
_lowerCamelCase : Union[str, Any] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , lowercase )
if only_modules:
_lowerCamelCase : Any = file.split('.' )[0]
try:
_lowerCamelCase : Dict = getattr(lowercase , lowercase )
_lowerCamelCase : Optional[Any] = doctest.DocTestSuite(lowercase )
_lowerCamelCase : Union[str, Any] = unittest.TextTestRunner().run(lowercase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
_lowerCamelCase : Any = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def A_ ( self ):
_lowerCamelCase : str = Path('src/transformers' )
_lowerCamelCase : Any = 'modeling'
_lowerCamelCase : str = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(lowercase , identifier=lowercase , ignore_files=lowercase )
def A_ ( self ):
_lowerCamelCase : str = Path('src/transformers' )
_lowerCamelCase : int = 'tokenization'
self.analyze_directory(lowercase , identifier=lowercase )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = Path('src/transformers' )
_lowerCamelCase : Union[str, Any] = 'configuration'
self.analyze_directory(lowercase , identifier=lowercase )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = Path('src/transformers' )
_lowerCamelCase : Dict = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(lowercase , n_identifier=lowercase )
def A_ ( self ):
_lowerCamelCase : List[str] = Path('docs/source' )
_lowerCamelCase : Optional[Any] = ['favicon.ico']
self.analyze_directory(lowercase , ignore_files=lowercase , only_modules=lowercase )
| 96
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __UpperCamelCase :
lowercase : Union[str, Any] =XGLMConfig
lowercase : Optional[Any] ={}
lowercase : Optional[int] ='gelu'
def __init__( self, lowerCAmelCase, lowerCAmelCase=14, lowerCAmelCase=7, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=True, lowerCAmelCase=99, lowerCAmelCase=32, lowerCAmelCase=2, lowerCAmelCase=4, lowerCAmelCase=37, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=512, lowerCAmelCase=0.0_2, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =seq_length
lowerCamelCase_ =is_training
lowerCamelCase_ =use_input_mask
lowerCamelCase_ =use_labels
lowerCamelCase_ =vocab_size
lowerCamelCase_ =d_model
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =ffn_dim
lowerCamelCase_ =activation_function
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =initializer_range
lowerCamelCase_ =None
lowerCamelCase_ =0
lowerCamelCase_ =2
lowerCamelCase_ =1
def lowercase__ ( self ):
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length], self.vocab_size ), clip_value_min=0, clip_value_max=3 )
lowerCamelCase_ =None
if self.use_input_mask:
lowerCamelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ =self.get_config()
lowerCamelCase_ =floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowercase__ ( self ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, num_layers=self.num_hidden_layers, attention_heads=self.num_attention_heads, ffn_dim=self.ffn_dim, activation_function=self.activation_function, activation_dropout=self.activation_dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, use_cache=lowerCAmelCase, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, return_dict=lowerCAmelCase, )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) =config_and_inputs
lowerCamelCase_ ={
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase : int =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowercase : Optional[Any] =(TFXGLMForCausalLM,) if is_tf_available() else ()
lowercase : Tuple =(
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowercase : Optional[Any] =False
lowercase : Optional[Any] =False
lowercase : Optional[int] =False
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFXGLMModelTester(self )
lowerCamelCase_ =ConfigTester(self, config_class=lowerCAmelCase, n_embd=37 )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowercase__ ( self ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ =TFXGLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def lowercase__ ( self ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def lowercase__ ( self, lowerCAmelCase=True ):
"""simple docstring"""
lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ =tf.convert_to_tensor([[2, 268, 9_865]], dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCamelCase_ =[2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist(), lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
lowerCamelCase_ =tokenizer('''Today is a nice day and''', return_tensors='''tf''' )
lowerCamelCase_ =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
lowerCamelCase_ =model.generate(lowerCAmelCase, do_sample=lowerCAmelCase, seed=[7, 0] )
lowerCamelCase_ =tokenizer.decode(output_ids[0], skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =(
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(lowerCAmelCase, lowerCAmelCase )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
lowerCamelCase_ ='''left'''
# use different length sentences to test batching
lowerCamelCase_ =[
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
lowerCamelCase_ =tokenizer(lowerCAmelCase, return_tensors='''tf''', padding=lowerCAmelCase )
lowerCamelCase_ =inputs['''input_ids''']
lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, attention_mask=inputs['''attention_mask'''], max_new_tokens=12 )
lowerCamelCase_ =tokenizer(sentences[0], return_tensors='''tf''' ).input_ids
lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 )
lowerCamelCase_ =tokenizer(sentences[1], return_tensors='''tf''' ).input_ids
lowerCamelCase_ =model.generate(input_ids=lowerCAmelCase, max_new_tokens=12 )
lowerCamelCase_ =tokenizer.batch_decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.decode(output_non_padded[0], skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =tokenizer.decode(output_padded[0], skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =[
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, [non_padded_sentence, padded_sentence] )
| 75
| 0
|
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __lowercase ( _a , _a , _a = 10**-10 ):
snake_case_ : Union[str, Any] = a
while True:
snake_case_ : Any = Decimal(_a ) - (
Decimal(eval(_a ) ) / Decimal(eval(str(diff(_a ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_a ) ) < precision: # noqa: S307
return float(_a )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
print(f'The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}')
# Find Square Root of 5
print(f'The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}')
# Exponential Roots
print(f'The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}')
| 361
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : List[str] = logging.get_logger(__name__)
lowercase__ : List[Any] = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Optional[Any] = """table-transformer"""
_lowerCAmelCase : Any = ["""past_key_values"""]
_lowerCAmelCase : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Any , lowercase_ : Any=True , lowercase_ : Dict=None , lowercase_ : Optional[int]=3 , lowercase_ : Any=100 , lowercase_ : List[str]=6 , lowercase_ : Any=2048 , lowercase_ : Any=8 , lowercase_ : Tuple=6 , lowercase_ : List[Any]=2048 , lowercase_ : List[str]=8 , lowercase_ : List[Any]=0.0 , lowercase_ : str=0.0 , lowercase_ : Dict=True , lowercase_ : Optional[int]="relu" , lowercase_ : Dict=256 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[Any]=0.0 , lowercase_ : List[str]=0.0 , lowercase_ : Dict=0.02 , lowercase_ : int=1.0 , lowercase_ : Tuple=False , lowercase_ : Optional[Any]="sine" , lowercase_ : Union[str, Any]="resnet50" , lowercase_ : List[Any]=True , lowercase_ : List[Any]=False , lowercase_ : Optional[Any]=1 , lowercase_ : Dict=5 , lowercase_ : List[Any]=2 , lowercase_ : Tuple=1 , lowercase_ : List[Any]=1 , lowercase_ : Dict=5 , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=0.1 , **lowercase_ : int , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case_ : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(lowercase_ , lowercase_ ):
snake_case_ : List[Any] = backbone_config.get('''model_type''' )
snake_case_ : int = CONFIG_MAPPING[backbone_model_type]
snake_case_ : List[str] = config_class.from_dict(lowercase_ )
# set timm attributes to None
snake_case_, snake_case_, snake_case_ : List[str] = None, None, None
snake_case_ : Tuple = use_timm_backbone
snake_case_ : int = backbone_config
snake_case_ : str = num_channels
snake_case_ : List[str] = num_queries
snake_case_ : int = d_model
snake_case_ : List[str] = encoder_ffn_dim
snake_case_ : Any = encoder_layers
snake_case_ : List[Any] = encoder_attention_heads
snake_case_ : Optional[int] = decoder_ffn_dim
snake_case_ : Tuple = decoder_layers
snake_case_ : List[str] = decoder_attention_heads
snake_case_ : Tuple = dropout
snake_case_ : Union[str, Any] = attention_dropout
snake_case_ : Dict = activation_dropout
snake_case_ : Optional[Any] = activation_function
snake_case_ : Optional[Any] = init_std
snake_case_ : str = init_xavier_std
snake_case_ : Any = encoder_layerdrop
snake_case_ : Optional[Any] = decoder_layerdrop
snake_case_ : List[str] = encoder_layers
snake_case_ : Optional[int] = auxiliary_loss
snake_case_ : List[Any] = position_embedding_type
snake_case_ : List[Any] = backbone
snake_case_ : Union[str, Any] = use_pretrained_backbone
snake_case_ : Optional[Any] = dilation
# Hungarian matcher
snake_case_ : Tuple = class_cost
snake_case_ : Any = bbox_cost
snake_case_ : Dict = giou_cost
# Loss coefficients
snake_case_ : Optional[Any] = mask_loss_coefficient
snake_case_ : str = dice_loss_coefficient
snake_case_ : List[str] = bbox_loss_coefficient
snake_case_ : int = giou_loss_coefficient
snake_case_ : Optional[Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowercase_ , **lowercase_ )
@property
def _snake_case ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def _snake_case ( self : Any ):
return self.d_model
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : List[Any] = version.parse("""1.11""")
@property
def _snake_case ( self : List[Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _snake_case ( self : int ):
return 1E-5
@property
def _snake_case ( self : Optional[int] ):
return 12
| 155
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__A = logging.get_logger(__name__)
__A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
__A = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : str = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Optional[int] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ : Any = RobertaTokenizer
def __init__( self : Optional[int] , UpperCamelCase__ : int=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : int="replace" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : Any="</s>" , UpperCamelCase__ : Union[str, Any]="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : int="<mask>" , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[Any]=True , **UpperCamelCase__ : Tuple , )-> Optional[int]:
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: str = getattr(UpperCamelCase__ , pre_tok_state.pop("type"))
__lowerCAmelCase: Optional[int] = add_prefix_space
__lowerCAmelCase: Dict = pre_tok_class(**UpperCamelCase__)
__lowerCAmelCase: Any = add_prefix_space
__lowerCAmelCase: int = "post_processor"
__lowerCAmelCase: Optional[Any] = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
if tokenizer_component_instance:
__lowerCAmelCase: Dict = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase: List[Any] = tuple(state["sep"])
if "cls" in state:
__lowerCAmelCase: str = tuple(state["cls"])
__lowerCAmelCase: str = False
if state.get("add_prefix_space" , UpperCamelCase__) != add_prefix_space:
__lowerCAmelCase: Optional[Any] = add_prefix_space
__lowerCAmelCase: List[str] = True
if state.get("trim_offsets" , UpperCamelCase__) != trim_offsets:
__lowerCAmelCase: Any = trim_offsets
__lowerCAmelCase: List[str] = True
if changes_to_apply:
__lowerCAmelCase: str = getattr(UpperCamelCase__ , state.pop("type"))
__lowerCAmelCase: List[str] = component_class(**UpperCamelCase__)
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__)
@property
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def lowercase_ ( self : Tuple , UpperCamelCase__ : Optional[int])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else value
__lowerCAmelCase: int = value
def lowercase_ ( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: List[Any] = kwargs.get("is_split_into_words" , UpperCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str])-> BatchEncoding:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = kwargs.get("is_split_into_words" , UpperCamelCase__)
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__)
def lowercase_ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
__lowerCAmelCase: str = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__)
return tuple(UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=None)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = [self.sep_token_id]
__lowerCAmelCase: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 217
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : Optional[int])-> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: str = 1
__lowerCAmelCase: Union[str, Any] = 3
__lowerCAmelCase: Union[str, Any] = (3_2, 3_2)
__lowerCAmelCase: Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCamelCase__)
return image
@property
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
torch.manual_seed(0)
__lowerCAmelCase: Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=UpperCamelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
__lowerCAmelCase: Tuple = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0)
__lowerCAmelCase: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(UpperCamelCase__)
def lowercase_ ( self : List[str])-> Dict:
'''simple docstring'''
__lowerCAmelCase: Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase: int = self.dummy_cond_unet_upscale
__lowerCAmelCase: int = DDPMScheduler()
__lowerCAmelCase: List[str] = DDIMScheduler(prediction_type="v_prediction")
__lowerCAmelCase: Tuple = self.dummy_vae
__lowerCAmelCase: Optional[Any] = self.dummy_text_encoder
__lowerCAmelCase: Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
__lowerCAmelCase: Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
__lowerCAmelCase: List[Any] = Image.fromarray(np.uinta(UpperCamelCase__)).convert("RGB").resize((6_4, 6_4))
# make sure here that pndm scheduler skips prk
__lowerCAmelCase: Optional[int] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=3_5_0 , )
__lowerCAmelCase: Tuple = sd_pipe.to(UpperCamelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__)
__lowerCAmelCase: Any = "A painting of a squirrel eating a burger"
__lowerCAmelCase: str = torch.Generator(device=UpperCamelCase__).manual_seed(0)
__lowerCAmelCase: Optional[int] = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase: List[str] = output.images
__lowerCAmelCase: Union[str, Any] = torch.Generator(device=UpperCamelCase__).manual_seed(0)
__lowerCAmelCase: List[str] = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=UpperCamelCase__ , )[0]
__lowerCAmelCase: int = image[0, -3:, -3:, -1]
__lowerCAmelCase: Dict = image_from_tuple[0, -3:, -3:, -1]
__lowerCAmelCase: Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__lowerCAmelCase: List[Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def lowercase_ ( self : List[str])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase: Dict = self.dummy_cond_unet_upscale
__lowerCAmelCase: List[str] = DDPMScheduler()
__lowerCAmelCase: Union[str, Any] = DDIMScheduler(prediction_type="v_prediction")
__lowerCAmelCase: Optional[int] = self.dummy_vae
__lowerCAmelCase: List[Any] = self.dummy_text_encoder
__lowerCAmelCase: Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
__lowerCAmelCase: List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
__lowerCAmelCase: str = Image.fromarray(np.uinta(UpperCamelCase__)).convert("RGB").resize((6_4, 6_4))
# make sure here that pndm scheduler skips prk
__lowerCAmelCase: Optional[int] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=3_5_0 , )
__lowerCAmelCase: Optional[int] = sd_pipe.to(UpperCamelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__)
__lowerCAmelCase: List[str] = "A painting of a squirrel eating a burger"
__lowerCAmelCase: List[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase: List[Any] = output.images
assert image.shape[0] == 2
__lowerCAmelCase: Dict = torch.Generator(device=UpperCamelCase__).manual_seed(0)
__lowerCAmelCase: Optional[Any] = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase: List[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU")
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.dummy_cond_unet_upscale
__lowerCAmelCase: int = DDPMScheduler()
__lowerCAmelCase: int = DDIMScheduler(prediction_type="v_prediction")
__lowerCAmelCase: Dict = self.dummy_vae
__lowerCAmelCase: int = self.dummy_text_encoder
__lowerCAmelCase: List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
__lowerCAmelCase: List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
__lowerCAmelCase: Optional[int] = Image.fromarray(np.uinta(UpperCamelCase__)).convert("RGB").resize((6_4, 6_4))
# put models in fp16, except vae as it overflows in fp16
__lowerCAmelCase: List[Any] = unet.half()
__lowerCAmelCase: List[str] = text_encoder.half()
# make sure here that pndm scheduler skips prk
__lowerCAmelCase: List[Any] = StableDiffusionUpscalePipeline(
unet=UpperCamelCase__ , low_res_scheduler=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , max_noise_level=3_5_0 , )
__lowerCAmelCase: str = sd_pipe.to(UpperCamelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = "A painting of a squirrel eating a burger"
__lowerCAmelCase: str = torch.manual_seed(0)
__lowerCAmelCase: Dict = sd_pipe(
[prompt] , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" , ).images
__lowerCAmelCase: Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def lowercase_ ( self : Tuple)-> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : List[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
__lowerCAmelCase: Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy")
__lowerCAmelCase: str = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase: Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(UpperCamelCase__)
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing()
__lowerCAmelCase: Tuple = "a cat sitting on a park bench"
__lowerCAmelCase: int = torch.manual_seed(0)
__lowerCAmelCase: List[Any] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="np" , )
__lowerCAmelCase: Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 1e-3
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
__lowerCAmelCase: Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
__lowerCAmelCase: Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy")
__lowerCAmelCase: Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase: Tuple = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing()
__lowerCAmelCase: str = "a cat sitting on a park bench"
__lowerCAmelCase: List[str] = torch.manual_seed(0)
__lowerCAmelCase: Optional[Any] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="np" , )
__lowerCAmelCase: Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image).max() < 5e-1
def lowercase_ ( self : Optional[int])-> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase: Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png")
__lowerCAmelCase: Union[str, Any] = "stabilityai/stable-diffusion-x4-upscaler"
__lowerCAmelCase: Any = StableDiffusionUpscalePipeline.from_pretrained(
UpperCamelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase__)
pipe.set_progress_bar_config(disable=UpperCamelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase: int = "a cat sitting on a park bench"
__lowerCAmelCase: Dict = torch.manual_seed(0)
__lowerCAmelCase: Dict = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , output_type="np" , )
__lowerCAmelCase: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 217
| 1
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
_a = logging.getLogger(__name__)
@dataclass(frozen=lowercase__ )
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : str
SCREAMING_SNAKE_CASE__ : Optional[str] = None
SCREAMING_SNAKE_CASE__ : Optional[str] = None
SCREAMING_SNAKE_CASE__ : Optional[str] = None
@dataclass(frozen=lowercase__ )
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[int]
SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None
SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None
SCREAMING_SNAKE_CASE__ : Optional[Union[int, float]] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[InputFeatures]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_=False , lowercase_ = False , ):
"""simple docstring"""
UpperCAmelCase_ : Any = hans_processors[task]()
UpperCAmelCase_ : Tuple = os.path.join(
_a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(_a ) , _a , ) , )
UpperCAmelCase_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ : Any = label_list[2], label_list[1]
UpperCAmelCase_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ : Optional[int] = cached_features_file + ".lock"
with FileLock(_a ):
if os.path.exists(_a ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
UpperCAmelCase_ : str = torch.load(_a )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
UpperCAmelCase_ : Optional[Any] = (
processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
)
logger.info("Training examples: %s" , len(_a ) )
UpperCAmelCase_ : List[str] = hans_convert_examples_to_features(_a , _a , _a , _a )
logger.info("Saving features into cached file %s" , _a )
torch.save(self.features , _a )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , lowercase_ ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[InputFeatures]
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 128 , lowercase_=False , lowercase_ = False , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = hans_processors[task]()
UpperCAmelCase_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = label_list[2], label_list[1]
UpperCAmelCase_ : Optional[int] = label_list
UpperCAmelCase_ : int = processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a )
UpperCAmelCase_ : Tuple = hans_convert_examples_to_features(_a , _a , _a , _a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(_a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ : Any = tf.data.Dataset.from_generator(
_a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , lowercase_ ):
"""simple docstring"""
return self.features[i]
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.label_list
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , "heuristics_train_set.txt" ) ) , "train" )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def UpperCamelCase__ ( self , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = []
for i, line in enumerate(_a ):
if i == 0:
continue
UpperCAmelCase_ : Dict = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ : List[Any] = line[5]
UpperCAmelCase_ : int = line[6]
UpperCAmelCase_ : Optional[Any] = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ : str = line[0]
examples.append(InputExample(guid=_a , text_a=_a , text_b=_a , label=_a , pairID=_a ) )
return examples
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, ):
UpperCAmelCase_ : str = {label: i for i, label in enumerate(UpperCamelCase__ )}
UpperCAmelCase_ : Any = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCamelCase__ ), desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ : Dict = tokenizer(
example.text_a, example.text_b, add_special_tokens=UpperCamelCase__, max_length=UpperCamelCase__, padding="max_length", truncation=UpperCamelCase__, return_overflowing_tokens=UpperCamelCase__, )
UpperCAmelCase_ : List[Any] = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ : Dict = int(example.pairID )
features.append(InputFeatures(**UpperCamelCase__, label=UpperCamelCase__, pairID=UpperCamelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
_a = {
'hans': 3,
}
_a = {
'hans': HansProcessor,
}
| 362
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgSuperResolutionPipeline
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
SCREAMING_SNAKE_CASE__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
SCREAMING_SNAKE_CASE__ : List[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
if str(lowercase_ ).startswith("mps" ):
UpperCAmelCase_ : Optional[Any] = torch.manual_seed(lowercase_ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
UpperCAmelCase_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
UpperCAmelCase_ : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 23
| 0
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCamelCase : Tuple = False
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] , A_ : Dict=32 ) -> Optional[int]:
"""simple docstring"""
set_seed(0 )
lowerCamelCase_ = UNetaDModel(sample_size=A_ , in_channels=3 , out_channels=3 )
lowerCamelCase_ = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def a__ ( self : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowerCamelCase_ = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=A_ , )
lowerCamelCase_ = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=A_ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowerCamelCase_ = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(A_ ) for _ in range(4 )]
lowerCamelCase_ = [torch.randn((4, 3, 32, 32) ).to(A_ ) for _ in range(4 )]
lowerCamelCase_ = [torch.randint(0 , 1000 , (4,) ).long().to(A_ ) for _ in range(4 )]
# train with a DDPM scheduler
lowerCamelCase_ , lowerCamelCase_ = self.get_model_optimizer(resolution=32 )
model.train().to(A_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase_ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase_ = model(A_ , timesteps[i] ).sample
lowerCamelCase_ = torch.nn.functional.mse_loss(A_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowerCamelCase_ , lowerCamelCase_ = self.get_model_optimizer(resolution=32 )
model.train().to(A_ )
for i in range(4 ):
optimizer.zero_grad()
lowerCamelCase_ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowerCamelCase_ = model(A_ , timesteps[i] ).sample
lowerCamelCase_ = torch.nn.functional.mse_loss(A_ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-5 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-5 ) )
| 204
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , A_ : TransformeraDModel , A_ : AutoencoderKL , A_ : KarrasDiffusionSchedulers , A_ : Optional[Dict[int, str]] = None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=A_ , vae=A_ , scheduler=A_ )
# create a imagenet -> id dictionary for easier use
lowerCamelCase_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
lowerCamelCase_ = int(A_ )
lowerCamelCase_ = dict(sorted(self.labels.items() ) )
def a__ ( self : Optional[int] , A_ : Union[str, List[str]] ) -> List[int]:
"""simple docstring"""
if not isinstance(A_ , A_ ):
lowerCamelCase_ = list(A_ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Any , A_ : List[int] , A_ : float = 4.0 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : int = 50 , A_ : Optional[str] = "pil" , A_ : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
lowerCamelCase_ = len(A_ )
lowerCamelCase_ = self.transformer.config.sample_size
lowerCamelCase_ = self.transformer.config.in_channels
lowerCamelCase_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=A_ , device=self.device , dtype=self.transformer.dtype , )
lowerCamelCase_ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowerCamelCase_ = torch.tensor(A_ , device=self.device ).reshape(-1 )
lowerCamelCase_ = torch.tensor([1000] * batch_size , device=self.device )
lowerCamelCase_ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowerCamelCase_ = latent_model_input[: len(A_ ) // 2]
lowerCamelCase_ = torch.cat([half, half] , dim=0 )
lowerCamelCase_ = self.scheduler.scale_model_input(A_ , A_ )
lowerCamelCase_ = t
if not torch.is_tensor(A_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowerCamelCase_ = latent_model_input.device.type == 'mps'
if isinstance(A_ , A_ ):
lowerCamelCase_ = torch.floataa if is_mps else torch.floataa
else:
lowerCamelCase_ = torch.intaa if is_mps else torch.intaa
lowerCamelCase_ = torch.tensor([timesteps] , dtype=A_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowerCamelCase_ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase_ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowerCamelCase_ = self.transformer(
A_ , timestep=A_ , class_labels=A_ ).sample
# perform guidance
if guidance_scale > 1:
lowerCamelCase_ , lowerCamelCase_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowerCamelCase_ , lowerCamelCase_ = torch.split(A_ , len(A_ ) // 2 , dim=0 )
lowerCamelCase_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowerCamelCase_ = torch.cat([half_eps, half_eps] , dim=0 )
lowerCamelCase_ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowerCamelCase_ , lowerCamelCase_ = torch.split(A_ , A_ , dim=1 )
else:
lowerCamelCase_ = noise_pred
# compute previous image: x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(A_ , A_ , A_ ).prev_sample
if guidance_scale > 1:
lowerCamelCase_ , lowerCamelCase_ = latent_model_input.chunk(2 , dim=0 )
else:
lowerCamelCase_ = latent_model_input
lowerCamelCase_ = 1 / self.vae.config.scaling_factor * latents
lowerCamelCase_ = self.vae.decode(A_ ).sample
lowerCamelCase_ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase_ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(A_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=A_ )
| 204
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Union[str, Any]:
"""simple docstring"""
snake_case_ = original_name.split('''.''' )[0]
snake_case_ = key.split('''.''' )
snake_case_ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
snake_case_ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
snake_case_ = orig_block_num - offset
snake_case_ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Optional[int]:
"""simple docstring"""
snake_case_ = OrderedDict()
snake_case_ , snake_case_ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case_ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case_ = key[: key.find('''proj''' )]
snake_case_ = key.replace(SCREAMING_SNAKE_CASE , f'''patch_embeddings.{total_embed_found}.''' )
snake_case_ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case_ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case_ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case_ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case_ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case_ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case_ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case_ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case_ = key.replace('''head''' , '''classifier''' )
snake_case_ = value
return new_state_dict
def __lowerCAmelCase ()-> str:
"""simple docstring"""
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Any:
"""simple docstring"""
snake_case_ = PoolFormerConfig()
# set attributes based on model_name
snake_case_ = '''huggingface/label-files'''
snake_case_ = model_name[-3:]
snake_case_ = 1000
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = (1, 1000)
# set config attributes
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case_ = [2, 2, 6, 2]
snake_case_ = [64, 128, 320, 512]
snake_case_ = 4.0
snake_case_ = 0.9
elif size == "s24":
snake_case_ = [4, 4, 12, 4]
snake_case_ = [64, 128, 320, 512]
snake_case_ = 4.0
snake_case_ = 0.9
elif size == "s36":
snake_case_ = [6, 6, 18, 6]
snake_case_ = [64, 128, 320, 512]
snake_case_ = 4.0
snake_case_ = 1E-6
snake_case_ = 0.9
elif size == "m36":
snake_case_ = [6, 6, 18, 6]
snake_case_ = [96, 192, 384, 768]
snake_case_ = 4.0
snake_case_ = 1E-6
snake_case_ = 0.9_5
elif size == "m48":
snake_case_ = [8, 8, 24, 8]
snake_case_ = [96, 192, 384, 768]
snake_case_ = 4.0
snake_case_ = 1E-6
snake_case_ = 0.9_5
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
snake_case_ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
snake_case_ = prepare_img()
snake_case_ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
snake_case_ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case_ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
snake_case_ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
snake_case_ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
snake_case_ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case_ = model(SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case_ = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
snake_case_ = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
snake_case_ = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
snake_case_ = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
snake_case_ = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 366
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
snake_case_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , '''num_encoder_blocks''' ) )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[2, 2, 2, 2] , _UpperCAmelCase=[8, 4, 2, 1] , _UpperCAmelCase=[16, 32, 64, 1_28] , _UpperCAmelCase=[1, 4, 8, 16] , _UpperCAmelCase=[1, 2, 4, 8] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_encoder_blocks
snake_case_ = sr_ratios
snake_case_ = depths
snake_case_ = hidden_sizes
snake_case_ = downsampling_rates
snake_case_ = num_attention_heads
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
def UpperCamelCase__ ( self ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = SegformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
snake_case_ = snake_case_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.num_labels
snake_case_ = SegformerForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = 1
snake_case_ = SegformerForSemanticSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase__ ( self ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__snake_case = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__snake_case = True
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase__ ( self ):
snake_case_ = SegformerModelTester(self )
snake_case_ = SegformerConfigTester(self , config_class=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_UpperCAmelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def UpperCamelCase__ ( self ):
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = True
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = outputs.attentions
snake_case_ = sum(self.model_tester.depths )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
snake_case_ = (self.model_tester.image_size // 4) ** 2
snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
snake_case_ = (self.model_tester.image_size // 32) ** 2
snake_case_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
snake_case_ = len(_UpperCAmelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCAmelCase ) )
snake_case_ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
snake_case_ = (self.model_tester.image_size // 4) ** 2
snake_case_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase__ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ):
continue
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
snake_case_ = model(**_UpperCAmelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase__ ( self ):
pass
@slow
def UpperCamelCase__ ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SegformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCAmelCase ()-> List[str]:
"""simple docstring"""
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ):
# only resize + normalize
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCAmelCase )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase )
snake_case_ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case_ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
# only resize + normalize
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(_UpperCAmelCase )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase )
snake_case_ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case_ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-1 ) )
@slow
def UpperCamelCase__ ( self ):
# only resize + normalize
snake_case_ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
snake_case_ = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
_UpperCAmelCase )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase )
snake_case_ = outputs.logits.detach().cpu()
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(5_00, 3_00)] )
snake_case_ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
snake_case_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
snake_case_ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 267
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class UpperCamelCase__ ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''vit_mae'''
def __init__( self : str , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Union[str, Any]=12 , lowerCamelCase_ : Optional[int]=12 , lowerCamelCase_ : Any=30_72 , lowerCamelCase_ : List[str]="gelu" , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : Dict=0.0 , lowerCamelCase_ : Any=0.02 , lowerCamelCase_ : int=1e-12 , lowerCamelCase_ : Optional[Any]=2_24 , lowerCamelCase_ : Union[str, Any]=16 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=5_12 , lowerCamelCase_ : Dict=8 , lowerCamelCase_ : Optional[int]=20_48 , lowerCamelCase_ : Optional[Any]=0.75 , lowerCamelCase_ : Union[str, Any]=False , **lowerCamelCase_ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : List[Any] = patch_size
SCREAMING_SNAKE_CASE : str = num_channels
SCREAMING_SNAKE_CASE : Tuple = qkv_bias
SCREAMING_SNAKE_CASE : List[Any] = decoder_num_attention_heads
SCREAMING_SNAKE_CASE : int = decoder_hidden_size
SCREAMING_SNAKE_CASE : Dict = decoder_num_hidden_layers
SCREAMING_SNAKE_CASE : Any = decoder_intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = mask_ratio
SCREAMING_SNAKE_CASE : Optional[Any] = norm_pix_loss
| 323
|
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCamelCase ):
# looping through rows of graph array
for i in range(_UpperCamelCase ):
# looping through columns of graph array
for j in range(_UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : List[Any] = dist[i][k] + dist[k][j]
_print_dist(_UpperCamelCase , _UpperCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('''Enter number of vertices: '''))
lowerCAmelCase_ = int(input('''Enter number of edges: '''))
lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowerCAmelCase_ = int(input('''Enter source:'''))
lowerCAmelCase_ = int(input('''Enter destination:'''))
lowerCAmelCase_ = float(input('''Enter weight:'''))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 279
| 0
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _UpperCAmelCase( unittest.TestCase ):
lowercase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCAmelCase ( self , __a , __a , __a) -> str:
'''simple docstring'''
_UpperCamelCase = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''')
_UpperCamelCase = VideoClassificationPipeline(model=_lowerCAmelCase , image_processor=_lowerCAmelCase , top_k=2)
_UpperCamelCase = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def UpperCAmelCase ( self , __a , __a) -> Dict:
'''simple docstring'''
for example in examples:
_UpperCamelCase = video_classifier(_lowerCAmelCase)
self.assertEqual(
_lowerCAmelCase , [
{'''score''': ANY(_lowerCAmelCase), '''label''': ANY(_lowerCAmelCase)},
{'''score''': ANY(_lowerCAmelCase), '''label''': ANY(_lowerCAmelCase)},
] , )
@require_torch
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
_UpperCamelCase = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10})
_UpperCamelCase = pipeline(
'''video-classification''' , model=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , frame_sampling_rate=4)
_UpperCamelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''')
_UpperCamelCase = video_classifier(_lowerCAmelCase , top_k=2)
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
_UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowerCAmelCase , decimals=4) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
| 370
|
"""simple docstring"""
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
_UpperCamelCase = str(bin(__snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
_UpperCamelCase = str(bin(__snake_case ) )[2:]
if shift_amount >= len(__snake_case ):
return "0b0"
_UpperCamelCase = binary_number[: len(__snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCamelCase__ ( __snake_case, __snake_case ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
_UpperCamelCase = '''0''' + str(bin(__snake_case ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
_UpperCamelCase = len(bin(__snake_case )[3:] ) # Find 2's complement of number
_UpperCamelCase = bin(abs(__snake_case ) - (1 << binary_number_length) )[3:]
_UpperCamelCase = (
'''1''' + '''0''' * (binary_number_length - len(__snake_case )) + binary_number
)
if shift_amount >= len(__snake_case ):
return "0b" + binary_number[0] * len(__snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
A : List[str] = logging.get_logger(__name__)
A : str = {
"Intel/dpt-large": "https://huggingface.co/Intel/dpt-large/resolve/main/config.json",
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class _UpperCamelCase ( _a ):
'''simple docstring'''
__UpperCAmelCase : Any ="""dpt"""
def __init__( self , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.0 , __a=0.0 , __a=0.0_2 , __a=1e-1_2 , __a=3_84 , __a=16 , __a=3 , __a=False , __a=True , __a=[2, 5, 8, 11] , __a="project" , __a=[4, 2, 1, 0.5] , __a=[96, 1_92, 3_84, 7_68] , __a=2_56 , __a=-1 , __a=False , __a=True , __a=0.4 , __a=2_55 , __a=0.1 , __a=[1, 10_24, 24, 24] , __a=[0, 1] , __a=None , **__a , ):
super().__init__(**__a )
__lowerCAmelCase = hidden_size
__lowerCAmelCase = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
__lowerCAmelCase = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
__lowerCAmelCase = BitConfig(**__a )
elif isinstance(__a , __a ):
logger.info("Initializing the config with a `BiT` backbone." )
__lowerCAmelCase = BitConfig(**__a )
elif isinstance(__a , __a ):
__lowerCAmelCase = backbone_config
else:
raise ValueError(
f"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}." )
__lowerCAmelCase = backbone_featmap_shape
__lowerCAmelCase = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = []
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = qkv_bias
__lowerCAmelCase = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
__lowerCAmelCase = readout_type
__lowerCAmelCase = reassemble_factors
__lowerCAmelCase = neck_hidden_sizes
__lowerCAmelCase = fusion_hidden_size
__lowerCAmelCase = head_in_index
__lowerCAmelCase = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase = use_auxiliary_head
__lowerCAmelCase = auxiliary_loss_weight
__lowerCAmelCase = semantic_loss_ignore_index
__lowerCAmelCase = semantic_classifier_dropout
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__lowerCAmelCase = self.backbone_config.to_dict()
__lowerCAmelCase = self.__class__.model_type
return output
| 57
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155
| 0
|
'''simple docstring'''
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list:
A_ = [True] * n
A_ = False
A_ = False
A_ = True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A_ = i * 2
while index < n:
A_ = False
A_ = index + i
A_ = [2]
for i in range(3, __A, 2 ):
if is_prime[i]:
primes.append(__A )
return primes
def UpperCAmelCase__ ( UpperCAmelCase__ = 99_99_66_66_33_33 ) -> int:
A_ = math.floor(math.sqrt(__A ) ) + 1_00
A_ = prime_sieve(__A )
A_ = 0
A_ = 0
A_ = primes[prime_index]
while (last_prime**2) <= limit:
A_ = primes[prime_index + 1]
A_ = last_prime**2
A_ = next_prime**2
# Get numbers divisible by lps(current)
A_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 357
|
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class A__ ( _snake_case ):
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = SMALL_MODEL_IDENTIFIER
A_ = """pt"""
A_ = """tf"""
def snake_case_ ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase__ )
model_tf.save_pretrained(UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = """mock_framework"""
# Framework provided - return whatever the user provides
A_ = FeaturesManager.determine_framework(self.test_model , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
A_ = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
A_ = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> int:
'''simple docstring'''
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase__ )
A_ = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase__ )
A_ = FeaturesManager.determine_framework(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase__ ):
A_ = FeaturesManager.determine_framework(UpperCamelCase__ )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = MagicMock(return_value=UpperCamelCase__ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase__ ):
A_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
A_ = MagicMock(return_value=UpperCamelCase__ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase__ ):
A_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_tf )
# Both in environment -> use PyTorch
A_ = MagicMock(return_value=UpperCamelCase__ )
A_ = MagicMock(return_value=UpperCamelCase__ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase__ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase__ ):
A_ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase__ , self.framework_pt )
# Both not in environment -> raise error
A_ = MagicMock(return_value=UpperCamelCase__ )
A_ = MagicMock(return_value=UpperCamelCase__ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase__ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase__ ):
with self.assertRaises(UpperCamelCase__ ):
A_ = FeaturesManager.determine_framework(self.test_model )
| 101
| 0
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _a ( __a , unittest.TestCase ):
__a : int = XGLMTokenizer
__a : Any = XGLMTokenizerFast
__a : Any = True
__a : Tuple = True
def A ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XGLMTokenizer(lowercase , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = '''<pad>'''
UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(lowercase ) , 1_008 )
def A ( self : str ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = XGLMTokenizer(lowercase , keep_accents=lowercase )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def A ( self : Any ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def A ( self : str ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase , f.name )
UpperCAmelCase = XGLMTokenizer(f.name , keep_accents=lowercase )
UpperCAmelCase = pickle.dumps(lowercase )
pickle.loads(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase = tokenizer.tokenize(lowercase )
UpperCAmelCase = rust_tokenizer.tokenize(lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
UpperCAmelCase = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(lowercase )
UpperCAmelCase = rust_tokenizer.encode(lowercase )
self.assertListEqual(lowercase , lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = '''Hello World!'''
UpperCAmelCase = [2, 31_227, 4_447, 35]
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def A ( self : int ):
'''simple docstring'''
UpperCAmelCase = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
UpperCAmelCase = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowercase , self.big_tokenizer.encode(lowercase ) )
@slow
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = {
'''input_ids''': [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name='''facebook/xglm-564M''' , padding=lowercase , )
| 34
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCamelCase__: Tuple = numpy.array([0, 0])
UpperCamelCase__: Union[str, Any] = numpy.array([0.5, 0.8660254])
UpperCamelCase__: Dict = numpy.array([1, 0])
UpperCamelCase__: int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] , _lowerCAmelCase : int ) -> list[numpy.ndarray]:
UpperCAmelCase : Union[str, Any] = initial_vectors
for _ in range(_lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] = iteration_step(_lowerCAmelCase )
return vectors
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
UpperCAmelCase : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase : List[str] = vectors[i + 1]
new_vectors.append(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def snake_case_ ( _lowerCAmelCase : numpy.ndarray , _lowerCAmelCase : float ) -> numpy.ndarray:
UpperCAmelCase : List[str] = numpy.radians(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : Tuple = numpy.cos(_lowerCAmelCase ), numpy.sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> None:
UpperCAmelCase : List[Any] = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase , UpperCAmelCase : str = zip(*_lowerCAmelCase )
plt.plot(_lowerCAmelCase , _lowerCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__: List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 23
| 0
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class A ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[Any],lowercase_ : pyspark.sql.DataFrame,lowercase_ : Optional[NamedSplit] = None,lowercase_ : Optional[Features] = None,lowercase_ : bool = True,lowercase_ : str = None,lowercase_ : bool = False,lowercase_ : str = None,lowercase_ : bool = True,lowercase_ : str = "arrow",**lowercase_ : List[str],)-> str:
'''simple docstring'''
super().__init__(
split=lowercase_,features=lowercase_,cache_dir=lowercase_,keep_in_memory=lowercase_,streaming=lowercase_,**lowercase_,)
A__ = load_from_cache_file
A__ = file_format
A__ = Spark(
df=lowercase_,features=lowercase_,cache_dir=lowercase_,working_dir=lowercase_,**lowercase_,)
def snake_case__ ( self : Optional[int] )-> Optional[Any]:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
A__ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowercase_,file_format=self._file_format,)
return self.builder.as_dataset(split=self.split )
| 282
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]="attention" ) -> Union[str, Any]:
'''simple docstring'''
A__ = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
A__ = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
A__ = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
A__ = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Dict=False ) -> str:
'''simple docstring'''
if split_mlp_wi:
A__ = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
A__ = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
A__ = (wi_a, wi_a)
else:
A__ = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
A__ = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def _snake_case( SCREAMING_SNAKE_CASE__ : dict , *, SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool ) -> int:
'''simple docstring'''
A__ = traverse_util.flatten_dict(variables['target'] )
A__ = {'/'.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
A__ = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , SCREAMING_SNAKE_CASE__ )
A__ = collections.OrderedDict()
# Shared embeddings.
A__ = old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_attention_layer_norm' )
A__ , A__ , A__ , A__ = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (MLP).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_mlp_layer_norm' )
A__ , A__ = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , SCREAMING_SNAKE_CASE__ )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old[
'encoder/relpos_bias/rel_embedding'
].T
A__ = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_self_attention_layer_norm' )
A__ , A__ , A__ , A__ = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'self_attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 1 (Cross Attention).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_cross_attention_layer_norm' )
A__ , A__ , A__ , A__ = tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'encoder_decoder_attention' )
A__ = layer_norm
A__ = k.T
A__ = o.T
A__ = q.T
A__ = v.T
# Block i, layer 2 (MLP).
A__ = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_mlp_layer_norm' )
A__ , A__ = tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , SCREAMING_SNAKE_CASE__ )
A__ = layer_norm
if split_mlp_wi:
A__ = wi[0].T
A__ = wi[1].T
else:
A__ = wi.T
A__ = wo.T
A__ = old['decoder/decoder_norm/scale']
A__ = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
A__ = old['decoder/logits_dense/kernel'].T
return new
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : bool ) -> Dict:
'''simple docstring'''
A__ = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
A__ = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
A__ = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
A__ = state_dict['shared.weight']
return state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
'''simple docstring'''
A__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
A__ = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ )
A__ = make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : bool = False ) -> Any:
'''simple docstring'''
A__ = TaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
A__ = TaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
A__ = TaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('Done' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
lowercase_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 282
| 1
|
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=[] ) -> List[Any]:
lowercase__ : Any = size[0] - overlap_pixels * 2
lowercase__ : Union[str, Any] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowercase__ : List[str] = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
lowercase__ : str = np.pad(a__ , mode='''linear_ramp''' , pad_width=a__ , end_values=0 )
if "l" in remove_borders:
lowercase__ : Any = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowercase__ : Tuple = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowercase__ : Optional[int] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowercase__ : int = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
return max(a__ , min(a__ , a__ ) )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : int = list(a__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowercase__ : Optional[int] = clamp_rect(a__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
lowercase__ : Optional[Any] = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(a__ , (original_slice, 0) )
return result
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
lowercase__ : Optional[int] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowercase__ : Dict = tile.crop(a__ )
return tile
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[str]:
lowercase__ : Optional[Any] = n % d
return n - divisor
class __A ( A_ ):
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : AutoencoderKL ,_snake_case : CLIPTextModel ,_snake_case : CLIPTokenizer ,_snake_case : UNetaDConditionModel ,_snake_case : DDPMScheduler ,_snake_case : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,_snake_case : int = 350 ,) -> int:
"""simple docstring"""
super().__init__(
vae=__SCREAMING_SNAKE_CASE ,text_encoder=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE ,unet=__SCREAMING_SNAKE_CASE ,low_res_scheduler=__SCREAMING_SNAKE_CASE ,scheduler=__SCREAMING_SNAKE_CASE ,max_noise_level=__SCREAMING_SNAKE_CASE ,)
def UpperCAmelCase ( self : List[Any] ,_snake_case : str ,_snake_case : Tuple ,_snake_case : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Tuple ,_snake_case : List[Any] ,_snake_case : int ,**_snake_case : str ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ : int = (
min(image.size[0] - (tile_size + original_image_slice) ,x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) ,y * tile_size ),
min(image.size[0] ,(x + 1) * tile_size ),
min(image.size[1] ,(y + 1) * tile_size ),
)
lowercase__ : Union[str, Any] = add_overlap_rect(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,image.size )
lowercase__ : Union[str, Any] = image.crop(__SCREAMING_SNAKE_CASE )
lowercase__ : Any = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowercase__ : Dict = translated_slice_x - (original_image_slice / 2)
lowercase__ : Any = max(0 ,__SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = squeeze_tile(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
lowercase__ : int = to_input.size
lowercase__ : Tuple = to_input.resize((tile_size, tile_size) ,Image.BICUBIC )
lowercase__ : Optional[int] = super(__SCREAMING_SNAKE_CASE ,self ).__call__(image=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ).images[0]
lowercase__ : List[Any] = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) ,Image.BICUBIC )
lowercase__ : Tuple = unsqueeze_tile(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) ,Image.BICUBIC )
lowercase__ : Optional[int] = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
lowercase__ : Optional[int] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) ,tile_border * 4 ,remove_borders=__SCREAMING_SNAKE_CASE ) ,mode='''L''' ,)
final_image.paste(
__SCREAMING_SNAKE_CASE ,(crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) ,__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self : List[str] ,_snake_case : Union[str, List[str]] ,_snake_case : Union[PIL.Image.Image, List[PIL.Image.Image]] ,_snake_case : int = 75 ,_snake_case : float = 9.0 ,_snake_case : int = 50 ,_snake_case : Optional[Union[str, List[str]]] = None ,_snake_case : Optional[int] = 1 ,_snake_case : float = 0.0 ,_snake_case : Optional[torch.Generator] = None ,_snake_case : Optional[torch.FloatTensor] = None ,_snake_case : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,_snake_case : int = 1 ,_snake_case : int = 128 ,_snake_case : int = 32 ,_snake_case : int = 32 ,) -> str:
"""simple docstring"""
lowercase__ : Dict = Image.new('''RGB''' ,(image.size[0] * 4, image.size[1] * 4) )
lowercase__ : Any = math.ceil(image.size[0] / tile_size )
lowercase__ : str = math.ceil(image.size[1] / tile_size )
lowercase__ : Tuple = tcx * tcy
lowercase__ : List[Any] = 0
for y in range(__SCREAMING_SNAKE_CASE ):
for x in range(__SCREAMING_SNAKE_CASE ):
self._process_tile(
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,prompt=__SCREAMING_SNAKE_CASE ,num_inference_steps=__SCREAMING_SNAKE_CASE ,guidance_scale=__SCREAMING_SNAKE_CASE ,noise_level=__SCREAMING_SNAKE_CASE ,negative_prompt=__SCREAMING_SNAKE_CASE ,num_images_per_prompt=__SCREAMING_SNAKE_CASE ,eta=__SCREAMING_SNAKE_CASE ,generator=__SCREAMING_SNAKE_CASE ,latents=__SCREAMING_SNAKE_CASE ,)
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def __UpperCAmelCase ( ) -> Optional[Any]:
lowercase__ : Union[str, Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
lowercase__ : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(a__ , revision='''fp16''' , torch_dtype=torch.floataa )
lowercase__ : int = pipe.to('''cuda''' )
lowercase__ : List[str] = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(__lowerCamelCase ):
print(f"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('''diffusers_library_progress.jpg''' )
lowercase__ : Dict = pipe(image=a__ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=a__ )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 16
|
'''simple docstring'''
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = n
__SCREAMING_SNAKE_CASE = [None] * self.n
__SCREAMING_SNAKE_CASE = 0 # index of the first element
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return self.size
def UpperCAmelCase__ ( self : Optional[Any] ) -> bool:
"""simple docstring"""
return self.size == 0
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
"""simple docstring"""
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
__SCREAMING_SNAKE_CASE = data
__SCREAMING_SNAKE_CASE = (self.rear + 1) % self.n
self.size += 1
return self
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
if self.size == 0:
raise Exception("""UNDERFLOW""" )
__SCREAMING_SNAKE_CASE = self.array[self.front]
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = (self.front + 1) % self.n
self.size -= 1
return temp
| 267
| 0
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase :
@staticmethod
def UpperCAmelCase(*_A : Tuple , **_A : Any ) -> int:
pass
def lowercase_ ( A__ ) -> str:
"""simple docstring"""
snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowercase_ ( A__ ) -> Dict:
"""simple docstring"""
snake_case = np.array(A__ )
snake_case = npimg.shape
return {"hash": hashimage(A__ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
UpperCAmelCase__ : int = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCAmelCase__ : Optional[int] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase(self : Tuple , _A : List[str] , _A : List[str] , _A : List[Any] ) -> Optional[int]:
snake_case = MaskGenerationPipeline(model=_A , image_processor=_A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase(self : List[str] , _A : str , _A : Tuple ) -> str:
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def UpperCAmelCase(self : List[str] ) -> int:
pass
@slow
@require_torch
def UpperCAmelCase(self : Optional[int] ) -> Optional[int]:
snake_case = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
snake_case = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_5_6 )
# Shortening by hashing
snake_case = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.0_21},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.00_53},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_8_0, 6_4_0)}, "scores": 0.99_67},
{"mask": {"hash": "453c7844bd", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_93},
{"mask": {"hash": "3d44f2926d", "shape": (4_8_0, 6_4_0)}, "scores": 0.99_09},
{"mask": {"hash": "64033ddc3f", "shape": (4_8_0, 6_4_0)}, "scores": 0.98_79},
{"mask": {"hash": "801064ff79", "shape": (4_8_0, 6_4_0)}, "scores": 0.98_34},
{"mask": {"hash": "6172f276ef", "shape": (4_8_0, 6_4_0)}, "scores": 0.97_16},
{"mask": {"hash": "b49e60e084", "shape": (4_8_0, 6_4_0)}, "scores": 0.96_12},
{"mask": {"hash": "a811e775fd", "shape": (4_8_0, 6_4_0)}, "scores": 0.95_99},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_8_0, 6_4_0)}, "scores": 0.95_52},
{"mask": {"hash": "9d8257e080", "shape": (4_8_0, 6_4_0)}, "scores": 0.95_32},
{"mask": {"hash": "32de6454a8", "shape": (4_8_0, 6_4_0)}, "scores": 0.95_16},
{"mask": {"hash": "af3d4af2c8", "shape": (4_8_0, 6_4_0)}, "scores": 0.94_99},
{"mask": {"hash": "3c6db475fb", "shape": (4_8_0, 6_4_0)}, "scores": 0.94_83},
{"mask": {"hash": "c290813fb9", "shape": (4_8_0, 6_4_0)}, "scores": 0.94_64},
{"mask": {"hash": "b6f0b8f606", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_43},
{"mask": {"hash": "92ce16bfdf", "shape": (4_8_0, 6_4_0)}, "scores": 0.9_43},
{"mask": {"hash": "c749b25868", "shape": (4_8_0, 6_4_0)}, "scores": 0.94_08},
{"mask": {"hash": "efb6cab859", "shape": (4_8_0, 6_4_0)}, "scores": 0.93_35},
{"mask": {"hash": "1ff2eafb30", "shape": (4_8_0, 6_4_0)}, "scores": 0.93_26},
{"mask": {"hash": "788b798e24", "shape": (4_8_0, 6_4_0)}, "scores": 0.92_62},
{"mask": {"hash": "abea804f0e", "shape": (4_8_0, 6_4_0)}, "scores": 0.89_99},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_8_0, 6_4_0)}, "scores": 0.89_86},
{"mask": {"hash": "cd24047c8a", "shape": (4_8_0, 6_4_0)}, "scores": 0.89_84},
{"mask": {"hash": "6943e6bcbd", "shape": (4_8_0, 6_4_0)}, "scores": 0.88_73},
{"mask": {"hash": "b5f47c9191", "shape": (4_8_0, 6_4_0)}, "scores": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase(self : int ) -> List[Any]:
snake_case = "facebook/sam-vit-huge"
snake_case = pipeline("mask-generation" , model=_A )
snake_case = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
snake_case = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.04_44},
{"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.02_10},
{"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.01_67},
{"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.01_32},
{"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.00_53},
] , )
| 137
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 137
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class a__ :
def __init__( self : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = value
SCREAMING_SNAKE_CASE_ : Node | None = None
SCREAMING_SNAKE_CASE_ : Node | None = None
class a__ :
def __init__( self : Optional[int],_A : Node ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tree
def __UpperCamelCase ( self : Any,_A : Node | None ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__magic_name__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
| 100
| 0
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCamelCase_ : str = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = """vision-encoder-decoder"""
__UpperCamelCase : List[Any] = True
def __init__( self : Tuple , **snake_case_ : Dict ):
super().__init__(**snake_case_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'''A configuraton of type {self.model_type} cannot be instantiated because '''
f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' )
UpperCamelCase_: Dict = kwargs.pop("""encoder""" )
UpperCamelCase_: Optional[int] = encoder_config.pop("""model_type""" )
UpperCamelCase_: Dict = kwargs.pop("""decoder""" )
UpperCamelCase_: int = decoder_config.pop("""model_type""" )
UpperCamelCase_: List[str] = AutoConfig.for_model(snake_case_ , **snake_case_ )
UpperCamelCase_: str = AutoConfig.for_model(snake_case_ , **snake_case_ )
UpperCamelCase_: Tuple = True
@classmethod
def lowerCAmelCase__ ( cls : int , snake_case_ : PretrainedConfig , snake_case_ : PretrainedConfig , **snake_case_ : List[Any] ):
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
UpperCamelCase_: str = True
UpperCamelCase_: Dict = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **snake_case_ )
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[str] = copy.deepcopy(self.__dict__ )
UpperCamelCase_: Dict = self.encoder.to_dict()
UpperCamelCase_: Dict = self.decoder.to_dict()
UpperCamelCase_: Optional[int] = self.__class__.model_type
return output
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Any = version.parse("""1.11""" )
@property
def lowerCAmelCase__ ( self : List[Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self : Dict ):
return 1e-4
@property
def lowerCAmelCase__ ( self : List[str] ):
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _UpperCamelCase ( _A ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: List[Any] = OrderedDict()
UpperCamelCase_: Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
UpperCamelCase_: List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
UpperCamelCase_: Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def lowerCAmelCase__ ( self : Any , snake_case_ : "PreTrainedTokenizerBase" , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional["TensorType"] = None , ):
import torch
UpperCamelCase_: List[Any] = OrderedDict()
UpperCamelCase_: Tuple = super().generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
UpperCamelCase_: int = dummy_input["""input_ids"""].shape
UpperCamelCase_: List[Any] = (batch, encoder_sequence, self._config.encoder_hidden_size)
UpperCamelCase_: List[Any] = dummy_input.pop("""input_ids""" )
UpperCamelCase_: Any = dummy_input.pop("""attention_mask""" )
UpperCamelCase_: Tuple = torch.zeros(snake_case_ )
return common_inputs
class _UpperCamelCase ( _A ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
pass
def lowerCAmelCase__ ( self : List[str] , snake_case_ : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(snake_case_ )
def lowerCAmelCase__ ( self : Any , snake_case_ : PretrainedConfig , snake_case_ : PretrainedConfig , snake_case_ : str = "default" ):
UpperCamelCase_: Union[str, Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(snake_case_ , snake_case_ )
| 363
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase_ : Dict = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCamelCase_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 223
| 0
|
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Dict:
"""simple docstring"""
_lowercase =0
if start < end:
_lowercase =randint(__snake_case , __snake_case )
_lowercase =a[end]
_lowercase =a[pivot]
_lowercase =temp
_lowercase , _lowercase =_in_place_partition(__snake_case , __snake_case , __snake_case )
count += _in_place_quick_sort(__snake_case , __snake_case , p - 1 )
count += _in_place_quick_sort(__snake_case , p + 1 , __snake_case )
return count
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =0
_lowercase =randint(__snake_case , __snake_case )
_lowercase =a[end]
_lowercase =a[pivot]
_lowercase =temp
_lowercase =start - 1
for index in range(__snake_case , __snake_case ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_lowercase =new_pivot_index + 1
_lowercase =a[new_pivot_index]
_lowercase =a[index]
_lowercase =temp
_lowercase =a[new_pivot_index + 1]
_lowercase =a[end]
_lowercase =temp
return new_pivot_index + 1, count
UpperCAmelCase__ = TemporaryFile()
UpperCAmelCase__ = 100 # 1000 elements are to be sorted
UpperCAmelCase__ ,UpperCAmelCase__ = 0, 1 # mean and standard deviation
UpperCAmelCase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ = np.load(outfile)
UpperCAmelCase__ = len(M) - 1
UpperCAmelCase__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 5
|
from functools import lru_cache
@lru_cache
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _snake_case ( __lowerCamelCase ):
snake_case__ = 'yolos'
def __init__( self : Dict , UpperCAmelCase : List[Any]=768 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : int=12 , UpperCAmelCase : int=3072 , UpperCAmelCase : List[str]="gelu" , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Optional[int]=0.0_2 , UpperCAmelCase : Dict=1E-12 , UpperCAmelCase : List[Any]=[512, 864] , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : Any=3 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=100 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Any=5 , UpperCAmelCase : Any=2 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : str=2 , UpperCAmelCase : Any=0.1 , **UpperCAmelCase : Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = intermediate_size
__lowerCamelCase : Tuple = hidden_act
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Dict = layer_norm_eps
__lowerCamelCase : Optional[int] = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Optional[Any] = qkv_bias
__lowerCamelCase : int = num_detection_tokens
__lowerCamelCase : Optional[int] = use_mid_position_embeddings
__lowerCamelCase : Optional[Any] = auxiliary_loss
# Hungarian matcher
__lowerCamelCase : Union[str, Any] = class_cost
__lowerCamelCase : Dict = bbox_cost
__lowerCamelCase : Union[str, Any] = giou_cost
# Loss coefficients
__lowerCamelCase : Optional[Any] = bbox_loss_coefficient
__lowerCamelCase : Union[str, Any] = giou_loss_coefficient
__lowerCamelCase : Any = eos_coefficient
class _snake_case ( __lowerCamelCase ):
snake_case__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : Any ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCamelCase__ ( self : Dict ):
return 1E-4
@property
def lowerCamelCase__ ( self : Dict ):
return 12
| 357
|
"""simple docstring"""
from typing import Any
import numpy as np
def lowercase_ ( _lowerCamelCase: np.ndarray ) -> bool:
'''simple docstring'''
return np.array_equal(_lowerCamelCase , matrix.conjugate().T )
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: np.ndarray ) -> Any:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = v.conjugate().T
__lowerCamelCase : Any = v_star.dot(_lowerCamelCase )
assert isinstance(_lowerCamelCase , np.ndarray )
return (v_star_dot.dot(_lowerCamelCase )) / (v_star.dot(_lowerCamelCase ))
def lowercase_ ( ) -> None:
'''simple docstring'''
__lowerCamelCase : List[str] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowerCamelCase : int = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCamelCase ), F"""{a} is not hermitian."""
print(rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) )
__lowerCamelCase : Dict = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCamelCase ), F"""{a} is not hermitian."""
assert rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 64
| 0
|
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_snake_case = (low + high) // 2
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , __lowercase , __lowercase )
_snake_case , _snake_case , _snake_case = max_subarray(__lowercase , mid + 1 , __lowercase )
_snake_case , _snake_case , _snake_case = max_cross_sum(__lowercase , __lowercase , __lowercase , __lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a_ ( __lowercase : Sequence[float] , __lowercase : int , __lowercase : int , __lowercase : int ) -> tuple[int, int, float]:
_snake_case , _snake_case = float('-inf' ), -1
_snake_case , _snake_case = float('-inf' ), -1
_snake_case = 0
for i in range(__lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_snake_case = summ
_snake_case = i
_snake_case = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_snake_case = summ
_snake_case = i
return max_left, max_right, (left_sum + right_sum)
def a_ ( __lowercase : int ) -> float:
_snake_case = [randint(1 , __lowercase ) for _ in range(__lowercase )]
_snake_case = time.time()
max_subarray(__lowercase , 0 , input_size - 1 )
_snake_case = time.time()
return end - start
def a_ ( ) -> None:
_snake_case = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
_snake_case = [time_max_subarray(__lowercase ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(__lowercase , __lowercase ):
print(__lowercase , '\t\t' , __lowercase )
plt.plot(__lowercase , __lowercase )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 282
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 282
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class snake_case_ :
def __init__( self : Dict , _snake_case : Tuple , _snake_case : Tuple=13 , _snake_case : List[str]=2 , _snake_case : int=24 , _snake_case : str=16 , _snake_case : Dict=True , _snake_case : str=True , _snake_case : str=32 , _snake_case : List[str]=5 , _snake_case : int=4 , _snake_case : Union[str, Any]=37 , _snake_case : Any="gelu" , _snake_case : Any=0.1 , _snake_case : Optional[int]=0.1 , _snake_case : Union[str, Any]=10 , _snake_case : Optional[Any]=0.02 , _snake_case : List[Any]=None , _snake_case : Optional[int]=2 , _snake_case : int=2 , )->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[int] = batch_size
__lowerCAmelCase : List[Any] = patch_size
__lowerCAmelCase : Tuple = max_length
__lowerCAmelCase : Optional[Any] = num_mel_bins
__lowerCAmelCase : Union[str, Any] = is_training
__lowerCAmelCase : str = use_labels
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : str = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : Optional[int] = intermediate_size
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : List[Any] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = type_sequence_label_size
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : List[Any] = scope
__lowerCAmelCase : int = frequency_stride
__lowerCAmelCase : Optional[int] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__lowerCAmelCase : str = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
__lowerCAmelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
__lowerCAmelCase : Any = frequency_out_dimension * time_out_dimension
__lowerCAmelCase : Optional[Any] = num_patches + 2
def UpperCAmelCase__ ( self : List[str] )->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
__lowerCAmelCase : List[Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Tuple = self.get_config()
return config, input_values, labels
def UpperCAmelCase__ ( self : int )->List[str]:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : Optional[int] , _snake_case : Any , _snake_case : List[Any] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Any = ASTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
__lowerCAmelCase : List[Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Dict )->Dict:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
__lowerCAmelCase
) : int = config_and_inputs
__lowerCAmelCase : Optional[Any] = {'input_values': input_values}
return config, inputs_dict
@require_torch
class snake_case_ ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
A_ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
A_ = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def UpperCAmelCase__ ( self : int , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Any , _snake_case : Dict , _snake_case : Union[str, Any] )->Optional[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase__ ( self : Tuple )->int:
'''simple docstring'''
__lowerCAmelCase : Any = ASTModelTester(self )
__lowerCAmelCase : Dict = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def UpperCAmelCase__ ( self : str )->str:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Optional[int] )->Dict:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def UpperCAmelCase__ ( self : List[str] )->str:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : int = model_class(_snake_case )
__lowerCAmelCase : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : List[Any] = [*signature.parameters.keys()]
__lowerCAmelCase : int = ['input_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def UpperCAmelCase__ ( self : Optional[Any] )->int:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@slow
def UpperCAmelCase__ ( self : List[Any] )->Dict:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : List[Any] = ASTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
__lowerCAmelCase : str = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
__lowerCAmelCase : Any = torchaudio.load(_UpperCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class snake_case_ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : List[str] )->Union[str, Any]:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase__ ( self : List[str] )->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.default_feature_extractor
__lowerCAmelCase : List[Any] = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_snake_case )
__lowerCAmelCase : Dict = self.default_feature_extractor
__lowerCAmelCase : Optional[int] = prepare_audio()
__lowerCAmelCase : Any = audio.squeeze().numpy()
__lowerCAmelCase : int = feature_extractor(_snake_case , sampling_rate=_snake_case , return_tensors="""pt""" ).to(_snake_case )
# forward pass
with torch.no_grad():
__lowerCAmelCase : List[str] = model(**_snake_case )
# verify the logits
__lowerCAmelCase : int = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _snake_case )
__lowerCAmelCase : int = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
| 351
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
_UpperCAmelCase = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
_UpperCAmelCase = {
'abeja/gpt-neox-japanese-2.7b': 2048,
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[int] ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : int = json.loads(f.read() )
__lowerCAmelCase : Dict = collections.OrderedDict()
__lowerCAmelCase : str = collections.OrderedDict()
__lowerCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" ) as f:
__lowerCAmelCase : Tuple = f.readlines()
__lowerCAmelCase : Tuple = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = b
__lowerCAmelCase : Dict = idx
for wd in b:
__lowerCAmelCase : List[str] = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class snake_case_ ( __lowercase ):
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['input_ids', 'attention_mask']
def __init__( self : str , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Any="<|endoftext|>" , _snake_case : str="<|endoftext|>" , _snake_case : str="<|startoftext|>" , _snake_case : List[Any]="<|endoftext|>" , _snake_case : str=False , **_snake_case : List[Any] , )->Union[str, Any]:
'''simple docstring'''
super().__init__(
unk_token=_snake_case , pad_token=_snake_case , bos_token=_snake_case , eos_token=_snake_case , do_clean_text=_snake_case , **_snake_case , )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'''Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'''
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(_snake_case ):
raise ValueError(
F'''Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'''
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
__lowerCAmelCase : Any = do_clean_text
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = load_vocab_and_emoji(_snake_case , _snake_case )
__lowerCAmelCase : int = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def UpperCAmelCase__ ( self : int )->str:
'''simple docstring'''
return len(self.raw_vocab )
def UpperCAmelCase__ ( self : Tuple )->Any:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Any , _snake_case : str )->Optional[int]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(_snake_case , clean=self.do_clean_text )
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : Optional[Any] )->Any:
'''simple docstring'''
return self.vocab.get(_snake_case , self.vocab.get(self.unk_token ) )
def UpperCAmelCase__ ( self : int , _snake_case : Any )->int:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(_snake_case )
def UpperCAmelCase__ ( self : Optional[int] , _snake_case : int )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = """""".join(_snake_case ).strip()
return out_string
def UpperCAmelCase__ ( self : List[str] , _snake_case : "Conversation" )->List[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_snake_case , add_special_tokens=_snake_case ) + [self.eos_token_id] )
if len(_snake_case ) > self.model_max_length:
__lowerCAmelCase : List[str] = input_ids[-self.model_max_length :]
return input_ids
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : str , _snake_case : Optional[str] = None )->Tuple[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = 0
if os.path.isdir(_snake_case ):
__lowerCAmelCase : Dict = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase : List[Any] = os.path.join(
_snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
__lowerCAmelCase : Union[str, Any] = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
__lowerCAmelCase : Dict = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
""" Please check that the vocabulary is not corrupted!""" )
__lowerCAmelCase : List[str] = token_index
writer.write(""",""".join(_snake_case ) + """\n""" )
index += 1
with open(_snake_case , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , _snake_case )
return vocab_file, emoji_file
class snake_case_ ( __lowercase ):
def __init__( self : Optional[Any] , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : Optional[int] )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = vocab # same as swe
__lowerCAmelCase : str = ids_to_tokens # same as bpe
__lowerCAmelCase : Dict = emoji
__lowerCAmelCase : int = np.max([len(_snake_case ) for w in self.vocab.keys()] )
__lowerCAmelCase : str = re.compile(R"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
__lowerCAmelCase : Optional[Any] = re.compile(R"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
__lowerCAmelCase : Tuple = re.compile(R"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
__lowerCAmelCase : Optional[Any] = re.compile(
R"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCAmelCase : Union[str, Any] = re.compile(
R"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
__lowerCAmelCase : str = re.compile(
R"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
__lowerCAmelCase : List[Any] = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
__lowerCAmelCase : Union[str, Any] = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
__lowerCAmelCase : str = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : int )->int:
'''simple docstring'''
return len(self.ids_to_tokens )
def UpperCAmelCase__ ( self : List[str] , _snake_case : Any )->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.content_repattera.sub("""<URL>""" , _snake_case )
__lowerCAmelCase : Tuple = self.content_repattera.sub("""<EMAIL>""" , _snake_case )
__lowerCAmelCase : Optional[Any] = self.content_repattera.sub("""<TEL>""" , _snake_case )
__lowerCAmelCase : str = self.content_repattera.sub("""<DATE>""" , _snake_case )
__lowerCAmelCase : Tuple = self.content_repattera.sub("""<DATE>""" , _snake_case )
__lowerCAmelCase : Tuple = self.content_repattera.sub("""<PRICE>""" , _snake_case )
__lowerCAmelCase : List[Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowerCAmelCase : str = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def UpperCAmelCase__ ( self : str , _snake_case : List[Any] , _snake_case : Optional[int]=False )->int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = text.replace(""" """ , """<SP>""" )
__lowerCAmelCase : Optional[int] = text.replace(""" """ , """<SP>""" )
__lowerCAmelCase : Union[str, Any] = text.replace("""\r\n""" , """<BR>""" )
__lowerCAmelCase : Tuple = text.replace("""\n""" , """<BR>""" )
__lowerCAmelCase : List[str] = text.replace("""\r""" , """<BR>""" )
__lowerCAmelCase : Dict = text.replace("""\t""" , """<TAB>""" )
__lowerCAmelCase : Dict = text.replace("""—""" , """ー""" )
__lowerCAmelCase : Tuple = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCAmelCase : Optional[Any] = text.replace(_snake_case , _snake_case )
if clean:
__lowerCAmelCase : List[Any] = self.clean_text(_snake_case )
def check_simbol(_snake_case : List[str] ):
__lowerCAmelCase : Optional[int] = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 2:
__lowerCAmelCase : Optional[Any] = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(_snake_case : Union[str, Any] ):
__lowerCAmelCase : Dict = x.encode()
if len(_snake_case ) == 1 and len(_snake_case ) == 3:
__lowerCAmelCase : List[str] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Dict = []
while pos < len(_snake_case ):
__lowerCAmelCase : str = min(len(_snake_case ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
__lowerCAmelCase : Tuple = [] # (token_id, token, pos)
for e in range(_snake_case , _snake_case , -1 ):
__lowerCAmelCase : Optional[int] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_snake_case ) > 2:
__lowerCAmelCase : Tuple = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_snake_case ) > 0:
# the smallest token_id is adopted
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = sorted(_snake_case , key=lambda _snake_case : x[0] )[0]
result.append(_snake_case )
__lowerCAmelCase : int = e
else:
__lowerCAmelCase : Dict = pos + 1
__lowerCAmelCase : Dict = text[pos:end]
if check_simbol(_snake_case ):
result.append("""<KIGOU>""" )
elif checkuae(_snake_case ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
__lowerCAmelCase : int = end
return result
def UpperCAmelCase__ ( self : List[str] , _snake_case : Optional[int] , _snake_case : List[Any]="\n" )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : Optional[Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCAmelCase : Optional[Any] = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(_snake_case )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(_snake_case )
if len(_snake_case ) > 0:
words.append(bytearray(_snake_case ).decode("""utf-8""" , errors="""replace""" ) )
__lowerCAmelCase : Dict = """""".join(_snake_case )
return text
| 232
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
a_ : Any = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _snake_case ( A__ ):
_lowercase : Dict = '''wav2vec2'''
def __init__( self , a=32 , a=768 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=0.1 , a=0.0 , a=0.0 , a=0.1 , a=0.1 , a=0.02 , a=1E-5 , a="group" , a="gelu" , a=(512, 512, 512, 512, 512, 512, 512) , a=(5, 2, 2, 2, 2, 2, 2) , a=(10, 3, 3, 3, 3, 2, 2) , a=False , a=128 , a=16 , a=False , a=True , a=0.05 , a=10 , a=2 , a=0.0 , a=10 , a=0 , a=320 , a=2 , a=0.1 , a=100 , a=256 , a=256 , a=0.1 , a="sum" , a=False , a=False , a=256 , a=(512, 512, 512, 512, 1500) , a=(5, 3, 3, 1, 1) , a=(1, 2, 3, 1, 1) , a=512 , a=0 , a=1 , a=2 , a=False , a=3 , a=2 , a=3 , a=None , a=None , **a , ) -> Optional[Any]:
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim)
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE = add_adapter
SCREAMING_SNAKE_CASE = adapter_kernel_size
SCREAMING_SNAKE_CASE = adapter_stride
SCREAMING_SNAKE_CASE = num_adapter_layers
SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
SCREAMING_SNAKE_CASE = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = list(a)
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 137
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
a_ : List[Any] = logging.get_logger(__name__)
a_ : str = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class _snake_case ( A__ ):
_lowercase : Any = '''deberta-v2'''
def __init__( self , a=12_8100 , a=1536 , a=24 , a=24 , a=6144 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=0 , a=0.02 , a=1E-7 , a=False , a=-1 , a=0 , a=True , a=None , a=0 , a="gelu" , **a , ) -> List[Any]:
super().__init__(**a)
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = relative_attention
SCREAMING_SNAKE_CASE = max_relative_positions
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = position_biased_input
# Backwards compatibility
if type(a) == str:
SCREAMING_SNAKE_CASE = [x.strip() for x in pos_att_type.lower().split('|')]
SCREAMING_SNAKE_CASE = pos_att_type
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = kwargs.get('pooler_hidden_size' , a)
SCREAMING_SNAKE_CASE = pooler_dropout
SCREAMING_SNAKE_CASE = pooler_hidden_act
class _snake_case ( A__ ):
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)])
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)])
@property
def SCREAMING_SNAKE_CASE__ ( self) -> int:
return 12
def SCREAMING_SNAKE_CASE__ ( self , a , a = -1 , a = -1 , a = -1 , a = False , a = None , a = 3 , a = 40 , a = 40 , a = None , ) -> Mapping[str, Any]:
SCREAMING_SNAKE_CASE = super().generate_dummy_inputs(preprocessor=a , framework=a)
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 137
| 1
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Dict ):
__lowerCamelCase : List[Any] = 0
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32' )
self.assertIsInstance(a , a )
def _snake_case ( self: Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : str = Path(a ) / 'preprocessor_config.json'
__lowerCamelCase : Optional[int] = Path(a ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(a , 'w' ) )
__lowerCamelCase : str = AutoImageProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _snake_case ( self: Tuple ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : Tuple = Path(a ) / 'preprocessor_config.json'
__lowerCamelCase : Any = Path(a ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(a , 'w' ) )
__lowerCamelCase : Any = AutoImageProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _snake_case ( self: Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__lowerCamelCase : Optional[Any] = Path(a ) / 'preprocessor_config.json'
__lowerCamelCase : Union[str, Any] = Path(a ) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(a , 'w' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained(a ).to_dict()
config_dict.pop('image_processor_type' )
__lowerCamelCase : int = CLIPImageProcessor(**a )
# save in new folder
model_config.save_pretrained(a )
config.save_pretrained(a )
__lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained(a )
# make sure private variable is not incorrectly saved
__lowerCamelCase : Tuple = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(a , a )
def _snake_case ( self: List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : List[Any] = Path(a ) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(a , 'w' ) , )
__lowerCamelCase : str = AutoImageProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _snake_case ( self: str ):
with self.assertRaisesRegex(
a , 'clip-base is not a local folder and is not a valid model identifier' ):
__lowerCamelCase : Dict = AutoImageProcessor.from_pretrained('clip-base' )
def _snake_case ( self: int ):
with self.assertRaisesRegex(
a , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__lowerCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained(a , revision='aaaaaa' )
def _snake_case ( self: Any ):
with self.assertRaisesRegex(
a , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model' )
def _snake_case ( self: str ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a ):
__lowerCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
__lowerCamelCase : Dict = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=a )
__lowerCamelCase : Tuple = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a )
__lowerCamelCase : Any = AutoImageProcessor.from_pretrained(a , trust_remote_code=a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor' )
def _snake_case ( self: Any ):
try:
AutoConfig.register('custom' , a )
AutoImageProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoImageProcessor.register(a , a )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : Tuple = Path(a ) / 'preprocessor_config.json'
__lowerCamelCase : Any = Path(a ) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(a , 'w' ) , )
json.dump({'model_type': 'clip'} , open(a , 'w' ) )
__lowerCamelCase : int = CustomImageProcessor.from_pretrained(a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a )
__lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self: List[Any] ):
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = True
try:
AutoConfig.register('custom' , a )
AutoImageProcessor.register(a , a )
# If remote code is not set, the default is to use local
__lowerCamelCase : List[str] = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor' )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__lowerCamelCase : str = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__lowerCamelCase : List[Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=a )
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor' )
self.assertTrue(not hasattr(a , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 357
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
__lowerCamelCase : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
__lowerCamelCase : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
__lowerCamelCase : Optional[int] = np.concatenate(SCREAMING_SNAKE_CASE__ , axis=0 )
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 255.0
__lowerCamelCase : List[str] = image.transpose(0 , 3 , 1 , 2 )
__lowerCamelCase : Union[str, Any] = 2.0 * image - 1.0
__lowerCamelCase : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
elif isinstance(image[0] , torch.Tensor ):
__lowerCamelCase : str = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
return image
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0.9_995 ):
if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
__lowerCamelCase : List[str] = True
__lowerCamelCase : str = va.device
__lowerCamelCase : int = va.cpu().numpy()
__lowerCamelCase : List[str] = va.cpu().numpy()
__lowerCamelCase : str = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE__ ) * np.linalg.norm(SCREAMING_SNAKE_CASE__ )) )
if np.abs(SCREAMING_SNAKE_CASE__ ) > DOT_THRESHOLD:
__lowerCamelCase : Union[str, Any] = (1 - t) * va + t * va
else:
__lowerCamelCase : List[Any] = np.arccos(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = np.sin(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = theta_a * t
__lowerCamelCase : List[Any] = np.sin(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = np.sin(theta_a - theta_t ) / sin_theta_a
__lowerCamelCase : List[Any] = sin_theta_t / sin_theta_a
__lowerCamelCase : Union[str, Any] = sa * va + sa * va
if inputs_are_torch:
__lowerCamelCase : str = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
return va
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
__lowerCamelCase : Union[str, Any] = F.normalize(SCREAMING_SNAKE_CASE__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
for param in model.parameters():
__lowerCamelCase : Any = value
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Any , a: AutoencoderKL , a: CLIPTextModel , a: CLIPModel , a: CLIPTokenizer , a: UNetaDConditionModel , a: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , a: CLIPFeatureExtractor , a: Union[str, Any]=None , a: Union[str, Any]=None , a: Union[str, Any]=None , ):
super().__init__()
self.register_modules(
vae=a , text_encoder=a , clip_model=a , tokenizer=a , unet=a , scheduler=a , feature_extractor=a , coca_model=a , coca_tokenizer=a , coca_transform=a , )
__lowerCamelCase : Tuple = (
feature_extractor.size
if isinstance(feature_extractor.size , a )
else feature_extractor.size['shortest_edge']
)
__lowerCamelCase : List[Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , a )
set_requires_grad(self.clip_model , a )
def _snake_case ( self: Optional[Any] , a: Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def _snake_case ( self: Dict ):
self.enable_attention_slicing(a )
def _snake_case ( self: Optional[Any] ):
set_requires_grad(self.vae , a )
def _snake_case ( self: List[Any] ):
set_requires_grad(self.vae , a )
def _snake_case ( self: int ):
set_requires_grad(self.unet , a )
def _snake_case ( self: int ):
set_requires_grad(self.unet , a )
def _snake_case ( self: Optional[Any] , a: Union[str, Any] , a: List[str] , a: List[Any] ):
# get the original timestep using init_timestep
__lowerCamelCase : List[Any] = min(int(num_inference_steps * strength ) , a )
__lowerCamelCase : str = max(num_inference_steps - init_timestep , 0 )
__lowerCamelCase : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self: Union[str, Any] , a: Optional[Any] , a: Any , a: Optional[int] , a: Optional[Any] , a: Union[str, Any] , a: List[str]=None ):
if not isinstance(a , torch.Tensor ):
raise ValueError(F'`image` has to be of type `torch.Tensor` but is {type(a )}' )
__lowerCamelCase : Union[str, Any] = image.to(device=a , dtype=a )
if isinstance(a , a ):
__lowerCamelCase : str = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
__lowerCamelCase : Tuple = torch.cat(a , dim=0 )
else:
__lowerCamelCase : List[Any] = self.vae.encode(a ).latent_dist.sample(a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : List[str] = 0.1_8_2_1_5 * init_latents
__lowerCamelCase : Union[str, Any] = init_latents.repeat_interleave(a , dim=0 )
__lowerCamelCase : Optional[int] = randn_tensor(init_latents.shape , generator=a , device=a , dtype=a )
# get latents
__lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(a , a , a )
__lowerCamelCase : int = init_latents
return latents
def _snake_case ( self: Optional[int] , a: Any ):
__lowerCamelCase : List[Any] = self.coca_transform(a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
__lowerCamelCase : Any = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
__lowerCamelCase : str = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def _snake_case ( self: Any , a: Tuple , a: Tuple ):
__lowerCamelCase : Dict = self.feature_extractor.preprocess(a )
__lowerCamelCase : Dict = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
__lowerCamelCase : List[str] = self.clip_model.get_image_features(a )
__lowerCamelCase : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
__lowerCamelCase : Tuple = image_embeddings_clip.repeat_interleave(a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def _snake_case ( self: str , a: str , a: int , a: List[Any] , a: str , a: List[Any] , a: Dict , a: int , ):
__lowerCamelCase : Optional[Any] = latents.detach().requires_grad_()
__lowerCamelCase : str = self.scheduler.scale_model_input(a , a )
# predict the noise residual
__lowerCamelCase : Optional[int] = self.unet(a , a , encoder_hidden_states=a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
__lowerCamelCase : str = self.scheduler.alphas_cumprod[timestep]
__lowerCamelCase : Dict = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCamelCase : Optional[int] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
__lowerCamelCase : Optional[int] = torch.sqrt(a )
__lowerCamelCase : int = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , a ):
__lowerCamelCase : str = self.scheduler.sigmas[index]
__lowerCamelCase : List[Any] = latents - sigma * noise_pred
else:
raise ValueError(F'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : Optional[int] = 1 / 0.1_8_2_1_5 * sample
__lowerCamelCase : Optional[Any] = self.vae.decode(a ).sample
__lowerCamelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Any = transforms.Resize(self.feature_extractor_size )(a )
__lowerCamelCase : Union[str, Any] = self.normalize(a ).to(latents.dtype )
__lowerCamelCase : Tuple = self.clip_model.get_image_features(a )
__lowerCamelCase : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
__lowerCamelCase : List[str] = spherical_dist_loss(a , a ).mean() * clip_guidance_scale
__lowerCamelCase : Tuple = -torch.autograd.grad(a , a )[0]
if isinstance(self.scheduler , a ):
__lowerCamelCase : Optional[int] = latents.detach() + grads * (sigma**2)
__lowerCamelCase : List[Any] = noise_pred_original
else:
__lowerCamelCase : str = noise_pred_original - torch.sqrt(a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self: Any , a: Union[torch.FloatTensor, PIL.Image.Image] , a: Union[torch.FloatTensor, PIL.Image.Image] , a: Optional[str] = None , a: Optional[str] = None , a: Optional[int] = 512 , a: Optional[int] = 512 , a: float = 0.6 , a: Optional[int] = 50 , a: Optional[float] = 7.5 , a: Optional[int] = 1 , a: float = 0.0 , a: Optional[float] = 100 , a: Optional[torch.Generator] = None , a: Optional[str] = "pil" , a: bool = True , a: float = 0.8 , a: float = 0.1 , a: float = 0.1 , ):
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(F'You have passed {batch_size} batch_size, but only {len(a )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(a , torch.Generator ) and batch_size > 1:
__lowerCamelCase : List[Any] = [generator] + [None] * (batch_size - 1)
__lowerCamelCase : Dict = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
__lowerCamelCase : Any = [x[0] for x in coca_is_none if x[1]]
__lowerCamelCase : str = ', '.join(a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(a ):
raise ValueError(
F'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
F'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__lowerCamelCase : Any = self.get_image_description(a )
if style_prompt is None:
if len(a ):
raise ValueError(
F'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
F' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
__lowerCamelCase : Tuple = self.get_image_description(a )
# get prompt text embeddings for content and style
__lowerCamelCase : int = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
__lowerCamelCase : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
__lowerCamelCase : Union[str, Any] = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
__lowerCamelCase : Any = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
__lowerCamelCase : List[Any] = slerp(a , a , a )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase : Any = text_embeddings.repeat_interleave(a , dim=0 )
# set timesteps
__lowerCamelCase : List[Any] = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
__lowerCamelCase : Union[str, Any] = {}
if accepts_offset:
__lowerCamelCase : Dict = 1
self.scheduler.set_timesteps(a , **a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
__lowerCamelCase , __lowerCamelCase : Dict = self.get_timesteps(a , a , self.device )
__lowerCamelCase : Tuple = timesteps[:1].repeat(a )
# Preprocess image
__lowerCamelCase : Any = preprocess(a , a , a )
__lowerCamelCase : str = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
__lowerCamelCase : Dict = preprocess(a , a , a )
__lowerCamelCase : Optional[int] = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
__lowerCamelCase : int = slerp(a , a , a )
if clip_guidance_scale > 0:
__lowerCamelCase : List[str] = self.get_clip_image_embeddings(a , a )
__lowerCamelCase : Union[str, Any] = self.get_clip_image_embeddings(a , a )
__lowerCamelCase : Union[str, Any] = slerp(
a , a , a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowerCamelCase : Tuple = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowerCamelCase : Optional[int] = content_text_input.input_ids.shape[-1]
__lowerCamelCase : int = self.tokenizer([''] , padding='max_length' , max_length=a , return_tensors='pt' )
__lowerCamelCase : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
__lowerCamelCase : List[Any] = uncond_embeddings.repeat_interleave(a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase : int = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowerCamelCase : str = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
__lowerCamelCase : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
__lowerCamelCase : Tuple = torch.randn(a , generator=a , device='cpu' , dtype=a ).to(
self.device )
else:
__lowerCamelCase : List[Any] = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__lowerCamelCase : List[str] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowerCamelCase : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowerCamelCase : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowerCamelCase : Dict = {}
if accepts_eta:
__lowerCamelCase : List[str] = eta
# check if the scheduler accepts generator
__lowerCamelCase : Optional[int] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
__lowerCamelCase : Optional[Any] = generator
with self.progress_bar(total=a ):
for i, t in enumerate(a ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase : Union[str, Any] = self.scheduler.scale_model_input(a , a )
# predict the noise residual
__lowerCamelCase : Tuple = self.unet(a , a , encoder_hidden_states=a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase : str = noise_pred.chunk(2 )
__lowerCamelCase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
__lowerCamelCase : str = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
__lowerCamelCase , __lowerCamelCase : int = self.cond_fn(
a , a , a , a , a , a , a , )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : Tuple = self.scheduler.step(a , a , a , **a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
__lowerCamelCase : List[Any] = 1 / 0.1_8_2_1_5 * latents
__lowerCamelCase : Union[str, Any] = self.vae.decode(a ).sample
__lowerCamelCase : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase : Union[str, Any] = self.numpy_to_pil(a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 194
| 0
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__lowercase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
return max(metric_fn(__lowerCamelCase , __lowerCamelCase ) for gt in ground_truths )
def lowercase ( A_ , A_ , A_ )-> Dict:
'''simple docstring'''
a : int = [line.strip() for line in open(__lowerCamelCase , "r" ).readlines()]
a : Optional[int] = []
if args.gold_data_mode == "qa":
a : List[Any] = pd.read_csv(__lowerCamelCase , sep="\t" , header=__lowerCamelCase )
for answer_list in data[1]:
a : Dict = ast.literal_eval(__lowerCamelCase )
answers.append(__lowerCamelCase )
else:
a : Tuple = [line.strip() for line in open(__lowerCamelCase , "r" ).readlines()]
a : Any = [[reference] for reference in references]
a : Dict = 0
for prediction, ground_truths in zip(__lowerCamelCase , __lowerCamelCase ):
total += 1
em += metric_max_over_ground_truths(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
fa += metric_max_over_ground_truths(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a : str = 1_0_0.0 * em / total
a : List[str] = 1_0_0.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def lowercase ( A_ , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
a : Tuple = args.k
a : Optional[Any] = [line.strip() for line in open(__lowerCamelCase , "r" ).readlines()]
a : Optional[int] = [line.strip() for line in open(__lowerCamelCase , "r" ).readlines()]
a : Dict = 0
for hypo, reference in zip(__lowerCamelCase , __lowerCamelCase ):
a : List[Any] = set(hypo.split("\t" )[:k] )
a : int = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a : Tuple = 1_0_0.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def lowercase ( A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
def strip_title(A_ ):
if title.startswith("\"" ):
a : List[str] = title[1:]
if title.endswith("\"" ):
a : Optional[Any] = title[:-1]
return title
a : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase , truncation=__lowerCamelCase , )["input_ids"].to(args.device )
a : Dict = rag_model.rag.question_encoder(__lowerCamelCase )
a : Optional[Any] = question_enc_outputs[0]
a : List[str] = rag_model.retriever(
__lowerCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a : str = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a : List[str] = []
for docs in all_docs:
a : Any = [strip_title(__lowerCamelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(__lowerCamelCase ) )
return provenance_strings
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
with torch.no_grad():
a : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__lowerCamelCase , return_tensors="pt" , padding=__lowerCamelCase , truncation=__lowerCamelCase )
a : str = inputs_dict.input_ids.to(args.device )
a : Optional[Any] = inputs_dict.attention_mask.to(args.device )
a : int = rag_model.generate( # rag_model overwrites generate
__lowerCamelCase , attention_mask=__lowerCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__lowerCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a : int = rag_model.retriever.generator_tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
if args.print_predictions:
for q, a in zip(__lowerCamelCase , __lowerCamelCase ):
logger.info("Q: {} - A: {}".format(__lowerCamelCase , __lowerCamelCase ) )
return answers
def lowercase ( )-> Optional[int]:
'''simple docstring'''
a : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=__lowerCamelCase , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=__lowerCamelCase , choices=["exact", "compressed", "legacy"] , type=__lowerCamelCase , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=__lowerCamelCase , type=__lowerCamelCase , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=__lowerCamelCase , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=__lowerCamelCase , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=__lowerCamelCase , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=__lowerCamelCase , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=__lowerCamelCase , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=__lowerCamelCase , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=__lowerCamelCase , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=__lowerCamelCase , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=__lowerCamelCase , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a : Optional[Any] = parser.parse_args()
a : int = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
a : Optional[int] = {}
if args.model_type is None:
a : Tuple = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a : Tuple = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a : int = args.n_docs
if args.index_name is not None:
a : str = args.index_name
if args.index_path is not None:
a : List[Any] = args.index_path
else:
a : int = BartForConditionalGeneration
a : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , __lowerCamelCase )
a : Dict = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a : Tuple = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__lowerCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__lowerCamelCase ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a : List[Any] = RagRetriever.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a : Tuple = model_class.from_pretrained(__lowerCamelCase , retriever=__lowerCamelCase , **__lowerCamelCase )
model.retriever.init_retrieval()
else:
a : Tuple = model_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a : Tuple = []
for line in tqdm(__lowerCamelCase ):
questions.append(line.strip() )
if len(__lowerCamelCase ) == args.eval_batch_size:
a : Any = evaluate_batch_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
preds_file.write("\n".join(__lowerCamelCase ) + "\n" )
preds_file.flush()
a : List[Any] = []
if len(__lowerCamelCase ) > 0:
a : int = evaluate_batch_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
preds_file.write("\n".join(__lowerCamelCase ) )
preds_file.flush()
score_fn(__lowerCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__lowercase = get_args()
main(args)
| 40
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : int =logging.get_logger(__name__)
lowerCAmelCase : List[str] ='''▁'''
lowerCAmelCase : List[str] ={
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase : Optional[Any] ={
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
lowerCAmelCase : int ={
'''facebook/m2m100_418M''': 1_024,
}
# fmt: off
lowerCAmelCase : str ={
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class a_ ( _lowerCAmelCase ):
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = ["input_ids", "attention_mask"]
__A = []
__A = []
def __init__( self : Any , lowercase : Any , lowercase : List[Any] , lowercase : int=None , lowercase : Optional[Any]=None , lowercase : Union[str, Any]="<s>" , lowercase : Any="</s>" , lowercase : Optional[int]="</s>" , lowercase : List[Any]="<pad>" , lowercase : Optional[int]="<unk>" , lowercase : Optional[int]="m2m100" , lowercase : Optional[Dict[str, Any]] = None , lowercase : Any=8 , **lowercase : int , ):
"""simple docstring"""
lowercase_ :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
lowercase_ :Optional[Any] = language_codes
lowercase_ :Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowercase_ :List[Any] = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
lowercase_ :Union[str, Any] = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase , tgt_lang=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , unk_token=lowercase , pad_token=lowercase , language_codes=lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase , **lowercase , )
lowercase_ :Optional[int] = vocab_file
lowercase_ :Any = load_json(lowercase )
lowercase_ :Optional[Any] = {v: k for k, v in self.encoder.items()}
lowercase_ :List[str] = spm_file
lowercase_ :List[str] = load_spm(lowercase , self.sp_model_kwargs )
lowercase_ :Optional[int] = len(self.encoder )
lowercase_ :int = {
self.get_lang_token(lowercase ): self.encoder_size + i for i, lang_code in enumerate(lowercase )
}
lowercase_ :List[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase )}
lowercase_ :List[Any] = {v: k for k, v in self.lang_token_to_id.items()}
lowercase_ :int = src_lang if src_lang is not None else "en"
lowercase_ :Union[str, Any] = tgt_lang
lowercase_ :List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowercase_ :int = num_madeup_words
@property
def lowercase__ ( self : List[str] ):
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowercase__ ( self : Any ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowercase__ ( self : Optional[int] , lowercase : str ):
"""simple docstring"""
lowercase_ :str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : Dict , lowercase : str ):
"""simple docstring"""
return self.sp_model.encode(lowercase , out_type=lowercase )
def lowercase__ ( self : Tuple , lowercase : Dict ):
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowercase , self.encoder[self.unk_token] )
def lowercase__ ( self : Any , lowercase : int ):
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowercase , self.unk_token )
def lowercase__ ( self : int , lowercase : int ):
"""simple docstring"""
lowercase_ :Optional[Any] = []
lowercase_ :Any = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
lowercase_ :str = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def lowercase__ ( self : Any , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
lowercase_ :List[Any] = [1] * len(self.prefix_tokens )
lowercase_ :List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase )) + suffix_ones
return prefix_ones + ([0] * len(lowercase )) + ([0] * len(lowercase )) + suffix_ones
def lowercase__ ( self : Union[str, Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :str = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ):
"""simple docstring"""
lowercase_ :Any = self.__dict__.copy()
lowercase_ :str = None
return state
def __setstate__( self : Tuple , lowercase : Dict ):
"""simple docstring"""
lowercase_ :int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase_ :List[str] = {}
lowercase_ :List[Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase__ ( self : str , lowercase : str , lowercase : Optional[str] = None ):
"""simple docstring"""
lowercase_ :Dict = Path(lowercase )
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory' )
lowercase_ :Dict = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowercase_ :Dict = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase )
elif not os.path.isfile(self.spm_file ):
with open(lowercase , "wb" ) as fi:
lowercase_ :List[str] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (str(lowercase ), str(lowercase ))
def lowercase__ ( self : List[str] , lowercase : List[str] , lowercase : str = "en" , lowercase : Optional[List[str]] = None , lowercase : str = "ro" , **lowercase : Optional[int] , ):
"""simple docstring"""
lowercase_ :int = src_lang
lowercase_ :Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def lowercase__ ( self : List[Any] , lowercase : Any , lowercase : Optional[str] , lowercase : Optional[str] , **lowercase : Union[str, Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowercase_ :List[str] = src_lang
lowercase_ :Union[str, Any] = self(lowercase , add_special_tokens=lowercase , **lowercase )
lowercase_ :str = self.get_lang_id(lowercase )
lowercase_ :Union[str, Any] = tgt_lang_id
return inputs
def lowercase__ ( self : str ):
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : str , lowercase : str ):
"""simple docstring"""
lowercase_ :List[str] = self.get_lang_token(lowercase )
lowercase_ :List[str] = self.lang_token_to_id[lang_token]
lowercase_ :List[Any] = [self.cur_lang_id]
lowercase_ :str = [self.eos_token_id]
def lowercase__ ( self : str , lowercase : str ):
"""simple docstring"""
lowercase_ :Optional[int] = self.get_lang_token(lowercase )
lowercase_ :Tuple = self.lang_token_to_id[lang_token]
lowercase_ :Dict = [self.cur_lang_id]
lowercase_ :List[Any] = [self.eos_token_id]
def lowercase__ ( self : Union[str, Any] , lowercase : str ):
"""simple docstring"""
return self.lang_code_to_token[lang]
def lowercase__ ( self : Dict , lowercase : str ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.get_lang_token(lowercase )
return self.lang_token_to_id[lang_token]
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : Dict[str, Any] ):
lowercase_ :List[str] = sentencepiece.SentencePieceProcessor(**__lowerCamelCase )
spm.Load(str(__lowerCamelCase ) )
return spm
def UpperCAmelCase_ ( __lowerCamelCase : str ):
with open(__lowerCamelCase ,"r" ) as f:
return json.load(__lowerCamelCase )
def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : str ):
with open(__lowerCamelCase ,"w" ) as f:
json.dump(__lowerCamelCase ,__lowerCamelCase ,indent=2 )
| 223
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Any = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Union[str, Any] , __A : List[Any] ) -> int:
"""simple docstring"""
a_ : str = os.path.abspath(__A )
logger.info(F"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
a_ : Dict = tf.train.list_variables(__A )
a_ : List[str] = []
a_ : Union[str, Any] = []
a_ : Any = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
a_ : Optional[int] = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(F"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
a_ : Optional[Any] = name[1:]
# figure out how many levels deep the name is
a_ : List[Any] = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(__A )
# read data
a_ : int = tf.train.load_variable(__A , __A )
names.append('/'.join(__A ) )
arrays.append(__A )
logger.info(F"""Read a total of {len(__A ):,} layers""" )
# Sanity check
if len(set(__A ) ) != 1:
raise ValueError(F"""Found layer names with different depths (layer depth {list(set(__A ) )})""" )
a_ : Union[str, Any] = list(set(__A ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(__A , __A ):
a_ : Union[str, Any] = full_name.split('/' )
a_ : Dict = model
a_ : List[str] = []
for i, m_name in enumerate(__A ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
a_ : Optional[int] = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
a_ : str = getattr(__A , 'embeddings' )
a_ : List[str] = getattr(__A , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
a_ : Optional[Any] = getattr(__A , 'encoder' )
a_ : List[str] = getattr(__A , 'layer' )
a_ : Optional[Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
a_ : int = getattr(__A , 'pooler' )
a_ : Dict = getattr(__A , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
a_ : Optional[Any] = getattr(__A , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
a_ : List[Any] = getattr(__A , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
a_ : int = getattr(__A , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
a_ : Dict = getattr(__A , 'token_type_embeddings' )
else:
raise ValueError(F"""Unknown embedding layer with name {full_name}""" )
trace.append('weight' )
a_ : Optional[Any] = getattr(__A , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
a_ : str = getattr(__A , 'attention' )
a_ : int = getattr(__A , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
a_ : int = getattr(__A , 'attention' )
a_ : Tuple = getattr(__A , 'output' )
a_ : Union[str, Any] = getattr(__A , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
a_ : Optional[int] = getattr(__A , 'attention' )
a_ : str = getattr(__A , 'output' )
a_ : Any = getattr(__A , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
a_ : int = getattr(__A , 'output' )
a_ : List[Any] = getattr(__A , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
a_ : Any = getattr(__A , 'output' )
a_ : int = getattr(__A , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
a_ : List[Any] = getattr(__A , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
a_ : Dict = getattr(__A , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
a_ : Union[str, Any] = getattr(__A , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
a_ : Union[str, Any] = getattr(__A , 'intermediate' )
a_ : List[str] = getattr(__A , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
a_ : List[Any] = getattr(__A , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
a_ : Tuple = getattr(__A , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
a_ : Dict = getattr(__A , 'weight' )
else:
logger.warning(F"""Ignored {m_name}""" )
# for certain layers reshape is necessary
a_ : Optional[Any] = '.'.join(__A )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , __A ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , __A ):
a_ : Optional[int] = array.reshape(pointer.data.shape )
if "kernel" in full_name:
a_ : Optional[int] = array.transpose()
if pointer.shape == array.shape:
a_ : str = torch.from_numpy(__A )
else:
raise ValueError(
F"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
F""" {array.shape}""" )
logger.info(F"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : Optional[Any] , __A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
logger.info(F"""Loading model based on config from {config_path}...""" )
a_ : Optional[Any] = BertConfig.from_json_file(__A )
a_ : List[str] = BertModel(__A )
# Load weights from checkpoint
logger.info(F"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(__A , __A , __A )
# Save pytorch-model
logger.info(F"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 120
|
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
while b:
a_ , a_ : int = b, a % b
return a
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(__A , a % b )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
"""simple docstring"""
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 120
| 1
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __UpperCamelCase :
def __init__( self , __a , __a=14 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ):
'''simple docstring'''
__a : Union[str, Any] = parent
__a : str = batch_size
__a : Optional[int] = seq_length
__a : Any = is_training
__a : Tuple = use_token_type_ids
__a : Optional[int] = use_input_mask
__a : Optional[int] = use_labels
__a : Dict = use_mc_token_ids
__a : Union[str, Any] = vocab_size
__a : Optional[Any] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : List[str] = num_attention_heads
__a : List[Any] = intermediate_size
__a : int = hidden_act
__a : List[str] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : Union[str, Any] = type_vocab_size
__a : int = type_sequence_label_size
__a : Union[str, Any] = initializer_range
__a : Optional[Any] = num_labels
__a : List[str] = num_choices
__a : Any = scope
__a : List[Any] = self.vocab_size - 1
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_input_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Tuple = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Optional[int] = None
if self.use_mc_token_ids:
__a : Optional[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__a : List[Any] = None
__a : Dict = None
__a : Tuple = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Any = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
__a : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCAmelCase ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Optional[int] = CTRLModel(config=__a )
model.to(__a )
model.eval()
model(__a , token_type_ids=__a , head_mask=__a )
model(__a , token_type_ids=__a )
__a : Any = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Dict = CTRLLMHeadModel(__a )
model.to(__a )
model.eval()
__a : List[str] = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Any = config_and_inputs
__a : List[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def __UpperCAmelCase ( self , __a , __a , __a , __a , *__a ):
'''simple docstring'''
__a : Any = self.num_labels
__a : Union[str, Any] = CTRLForSequenceClassification(__a )
model.to(__a )
model.eval()
__a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : str = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
A_ = (CTRLLMHeadModel,) if is_torch_available() else ()
A_ = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
def __UpperCAmelCase ( self , __a , __a , __a , __a , __a ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = CTRLModelTester(self )
__a : str = ConfigTester(self , config_class=__a , n_embd=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__a )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = CTRLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(__a )
__a : Union[str, Any] = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=__a ) # Legal the president is
__a : List[Any] = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__a : List[str] = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].tolist() , __a )
| 27
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : int = 0.00
_snake_case : int = 0
for resistor in resistors:
if resistor <= 0:
_snake_case : Dict = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0.00
_snake_case : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_snake_case : Any = F"Resistor at index {index} has a negative value!"
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64
| 0
|
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowerCAmelCase = "\\n Text data.\n Second line of data."
_lowerCAmelCase = "file"
@pytest.fixture(scope='''session''' )
def UpperCamelCase ( a ) -> Dict:
'''simple docstring'''
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
__magic_name__ = bytes(a , '''utf-8''' )
with zstd.open(a , '''wb''' ) as f:
f.write(a )
return path
@pytest.fixture
def UpperCamelCase ( a ) -> Optional[Any]:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , a ) , '''w''' ) as f:
f.write(a )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def UpperCamelCase ( a , a , a , a , a , a ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
__magic_name__ = input_paths[compression_format]
__magic_name__ = tmp_path / '''cache'''
__magic_name__ = DownloadConfig(cache_dir=a , extract_compressed_file=a )
__magic_name__ = cached_path(a , download_config=a )
with open(a ) as f:
__magic_name__ = f.read()
with open(a ) as f:
__magic_name__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def UpperCamelCase ( a , a , a , a , a ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = '''custom_cache'''
__magic_name__ = '''custom_extracted_dir'''
__magic_name__ = tmp_path / '''custom_extracted_path'''
if default_extracted:
__magic_name__ = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , a )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(a ) )
__magic_name__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__magic_name__ = xz_file
__magic_name__ = (
DownloadConfig(extract_compressed_file=a )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a )
)
__magic_name__ = cached_path(a , download_config=a )
assert Path(a ).parent.parts[-2:] == expected
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
# absolute path
__magic_name__ = str(Path(a ).resolve() )
assert cached_path(a ) == text_file
# relative path
__magic_name__ = str(Path(a ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(a ) == text_file
def UpperCamelCase ( a ) -> Union[str, Any]:
'''simple docstring'''
# absolute path
__magic_name__ = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(a ):
cached_path(a )
# relative path
__magic_name__ = '''./__missing_file__.txt'''
with pytest.raises(a ):
cached_path(a )
def UpperCamelCase ( a ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(a ) as f:
__magic_name__ = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , a )
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
with pytest.raises(a ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , a )
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(a ):
http_get('''https://huggingface.co''' , temp_file=a )
with pytest.raises(a ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , a )
def UpperCamelCase ( a ) -> List[str]:
'''simple docstring'''
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(a ):
ftp_get('''ftp://huggingface.co''' , temp_file=a )
with pytest.raises(a ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , a )
def UpperCamelCase ( a ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(a ):
fsspec_get('''s3://huggingface.co''' , temp_file=a )
with pytest.raises(a ):
fsspec_head('''s3://huggingface.co''' )
| 98
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["GLPNFeatureExtractor"]
_lowerCAmelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 98
| 1
|
import copy
import re
class __lowerCamelCase :
"""simple docstring"""
UpperCamelCase__ = "hp"
UpperCamelCase__ = {}
UpperCamelCase__ = None
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = prefix
_UpperCAmelCase = defaults
cls.build_naming_info()
@staticmethod
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
if len(UpperCAmelCase ) == 0:
return ""
_UpperCAmelCase = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(UpperCAmelCase ) + 1 ):
_UpperCAmelCase = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_UpperCAmelCase = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(UpperCAmelCase ):
_UpperCAmelCase = ''
while integer != 0:
_UpperCAmelCase = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
_UpperCAmelCase = 0
while True:
_UpperCAmelCase = word + '#' + int_to_alphabetic(UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
_UpperCAmelCase = sword
break
_UpperCAmelCase = short_word
_UpperCAmelCase = word
return short_word
@staticmethod
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = param_name.split('_' )
_UpperCAmelCase = [TrialShortNamer.shortname_for_word(UpperCAmelCase , UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_UpperCAmelCase = ['', '_']
for separator in separators:
_UpperCAmelCase = separator.join(UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
_UpperCAmelCase = shortname
_UpperCAmelCase = param_name
return shortname
return param_name
@staticmethod
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = TrialShortNamer.shortname_for_key(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = short_name
_UpperCAmelCase = param_name
@classmethod
def UpperCamelCase ( cls ):
"""simple docstring"""
if cls.NAMING_INFO is not None:
return
_UpperCAmelCase = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
_UpperCAmelCase = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = info
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase ):
"""simple docstring"""
cls.build_naming_info()
assert cls.PREFIX is not None
_UpperCAmelCase = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_UpperCAmelCase = cls.NAMING_INFO['short_param'][k]
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = 1 if v else 0
_UpperCAmelCase = '' if isinstance(UpperCAmelCase , (int, float) ) else '-'
_UpperCAmelCase = F"""{key}{sep}{v}"""
name.append(UpperCAmelCase )
return "_".join(UpperCAmelCase )
@classmethod
def UpperCamelCase ( cls , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_UpperCAmelCase = []
else:
_UpperCAmelCase = repr.split('_' )
_UpperCAmelCase = {}
for value in values:
if "-" in value:
_UpperCAmelCase , _UpperCAmelCase = value.split('-' )
else:
_UpperCAmelCase = re.sub('[0-9.]' , '' , UpperCAmelCase )
_UpperCAmelCase = float(re.sub('[^0-9.]' , '' , UpperCAmelCase ) )
_UpperCAmelCase = cls.NAMING_INFO['reverse_short_param'][p_k]
_UpperCAmelCase = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_UpperCAmelCase = cls.DEFAULTS[k]
return parameters
| 39
|
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :int ) -> Dict:
__UpperCamelCase : Union[str, Any] = {}
def _lowerCamelCase ( self :str ) -> None:
print(self.vertex )
for i in self.vertex:
print(a , " -> " , " -> ".join([str(a ) for j in self.vertex[i]] ) )
def _lowerCamelCase ( self :List[Any] , a :int , a :int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(a )
else:
# else make a new vertex
__UpperCamelCase : Optional[Any] = [to_vertex]
def _lowerCamelCase ( self :Tuple ) -> None:
# visited array for storing already visited nodes
__UpperCamelCase : Dict = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(a , a )
def _lowerCamelCase ( self :Any , a :int , a :list ) -> None:
# mark start vertex as visited
__UpperCamelCase : int = True
print(a , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(a , a )
if __name__ == "__main__":
lowercase : Dict = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 232
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 260
|
from math import ceil
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Union[str, Any] )-> str:
_snake_case : Union[str, Any] = list(range(0 , lowerCAmelCase ) )
_snake_case : int = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_snake_case : Any = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase )
# Missing blocks
_snake_case : Dict = [i for i in blocks if i not in device_map_blocks]
_snake_case : Tuple = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(lowerCAmelCase ) )
if len(lowerCAmelCase ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(lowerCAmelCase ) )
if len(lowerCAmelCase ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(lowerCAmelCase ) )
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: List[Any] )-> Optional[Any]:
_snake_case : int = list(range(lowerCAmelCase ) )
_snake_case : Union[str, Any] = int(ceil(n_layers / len(lowerCAmelCase ) ) )
_snake_case : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , lowerCAmelCase , lowerCAmelCase )]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
| 260
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = """swin2sr"""
UpperCamelCase = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Tuple, __A : List[Any]=6_4, __A : List[Any]=1, __A : List[Any]=3, __A : Optional[int]=1_8_0, __A : Optional[int]=[6, 6, 6, 6, 6, 6], __A : List[Any]=[6, 6, 6, 6, 6, 6], __A : Any=8, __A : List[str]=2.0, __A : int=True, __A : Optional[int]=0.0, __A : Optional[int]=0.0, __A : List[Any]=0.1, __A : Optional[Any]="gelu", __A : Optional[int]=False, __A : str=0.0_2, __A : Optional[Any]=1E-5, __A : Optional[int]=2, __A : Any=1.0, __A : str="1conv", __A : str="pixelshuffle", **__A : Dict, ):
super().__init__(**__a )
UpperCAmelCase : Tuple = image_size
UpperCAmelCase : List[Any] = patch_size
UpperCAmelCase : Dict = num_channels
UpperCAmelCase : List[Any] = embed_dim
UpperCAmelCase : Dict = depths
UpperCAmelCase : Tuple = len(__a )
UpperCAmelCase : str = num_heads
UpperCAmelCase : int = window_size
UpperCAmelCase : Union[str, Any] = mlp_ratio
UpperCAmelCase : List[Any] = qkv_bias
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : List[Any] = attention_probs_dropout_prob
UpperCAmelCase : Any = drop_path_rate
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : List[str] = use_absolute_embeddings
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Optional[Any] = initializer_range
UpperCAmelCase : List[Any] = upscale
UpperCAmelCase : Any = img_range
UpperCAmelCase : Optional[int] = resi_connection
UpperCAmelCase : List[str] = upsampler
| 336
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
return (-y * np.log(__snake_case ) - (1 - y) * np.log(1 - h )).mean()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = np.dot(__snake_case, __snake_case )
return np.sum(y * scores - np.log(1 + np.exp(__snake_case ) ) )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=7_00_00 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = np.zeros(x.shape[1] )
for iterations in range(__snake_case ):
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = np.dot(x.T, h - y ) / y.size
_UpperCamelCase = theta - alpha * gradient # updating the weights
_UpperCamelCase = np.dot(__snake_case, __snake_case )
_UpperCamelCase = sigmoid_function(__snake_case )
_UpperCamelCase = cost_function(__snake_case, __snake_case )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_a = datasets.load_iris()
_a = iris.data[:, :2]
_a = (iris.target != 0) * 1
_a = 0.1
_a = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
return sigmoid_function(
np.dot(__snake_case, __snake_case ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((_a) , (_a)) = (x[:, 0].min(), x[:, 0].max())
((_a) , (_a)) = (x[:, 1].min(), x[:, 1].max())
((_a) , (_a)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_a = np.c_[xxa.ravel(), xxa.ravel()]
_a = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 194
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Optional[int] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class __magic_name__ ( __lowerCAmelCase):
A: Optional[Any] = "falcon"
A: Tuple = ["past_key_values"]
def __init__( self : List[Any] , lowerCamelCase__ : List[Any]=65024 , lowerCamelCase__ : str=4544 , lowerCamelCase__ : Any=32 , lowerCamelCase__ : Any=71 , lowerCamelCase__ : Union[str, Any]=1E-5 , lowerCamelCase__ : Dict=0.02 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : str=False , lowerCamelCase__ : Any=False , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : List[Any]=11 , lowerCamelCase__ : int=11 , **lowerCamelCase__ : Tuple , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase__ : Optional[Any] = kwargs.pop('''n_embed''' , lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = hidden_size if n_embed is None else n_embed
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : Any = num_attention_heads
UpperCamelCase__ : Union[str, Any] = layer_norm_epsilon
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : Optional[Any] = use_cache
UpperCamelCase__ : Optional[int] = hidden_dropout
UpperCamelCase__ : Dict = attention_dropout
UpperCamelCase__ : Optional[int] = bos_token_id
UpperCamelCase__ : List[Any] = eos_token_id
UpperCamelCase__ : List[str] = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCamelCase__ : List[Any] = alibi
UpperCamelCase__ : Tuple = new_decoder_architecture
UpperCamelCase__ : Optional[int] = multi_query # Ignored when new_decoder_architecture is True
UpperCamelCase__ : Union[str, Any] = parallel_attn
UpperCamelCase__ : List[str] = bias
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase__ ( self : str ) -> List[Any]:
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
return not self.alibi
| 353
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __magic_name__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping]):
def __init__( self : Optional[int] , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
super().__init__(features=lowerCamelCase__ )
UpperCamelCase__ : Any = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and column:
if all(
isinstance(lowerCamelCase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase__ )
return column
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] ) -> int:
'''simple docstring'''
import torch
if isinstance(lowerCamelCase__ , (str, bytes, type(lowerCamelCase__ )) ):
return value
elif isinstance(lowerCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase__ : Tuple = {}
if isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
UpperCamelCase__ : int = {'''dtype''': torch.intaa}
elif isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase__ : Any = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
UpperCamelCase__ : Optional[int] = np.asarray(lowerCamelCase__ )
return torch.tensor(lowerCamelCase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any ) -> Dict:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase__ , '''__array__''' ) and not isinstance(lowerCamelCase__ , torch.Tensor ):
UpperCamelCase__ : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase__ )
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : dict ) -> Optional[int]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCamelCase__ , map_list=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : pa.Table ) -> Mapping:
'''simple docstring'''
UpperCamelCase__ : int = self.numpy_arrow_extractor().extract_row(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = self.python_features_decoder.decode_row(lowerCamelCase__ )
return self.recursive_tensorize(lowerCamelCase__ )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : pa.Table ) -> "torch.Tensor":
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.numpy_arrow_extractor().extract_column(lowerCamelCase__ )
UpperCamelCase__ : str = self.python_features_decoder.decode_column(lowerCamelCase__ , pa_table.column_names[0] )
UpperCamelCase__ : int = self.recursive_tensorize(lowerCamelCase__ )
UpperCamelCase__ : Tuple = self._consolidate(lowerCamelCase__ )
return column
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : pa.Table ) -> Mapping:
'''simple docstring'''
UpperCamelCase__ : Dict = self.numpy_arrow_extractor().extract_batch(lowerCamelCase__ )
UpperCamelCase__ : Any = self.python_features_decoder.decode_batch(lowerCamelCase__ )
UpperCamelCase__ : Tuple = self.recursive_tensorize(lowerCamelCase__ )
for column_name in batch:
UpperCamelCase__ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 51
| 0
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@parameterized.expand([(None,), ("""foo.json""",)] )
def __lowercase ( self : Optional[int] , lowerCamelCase : List[str] ) -> Dict:
lowerCAmelCase_ : List[Any] = GenerationConfig(
do_sample=lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase , config_name=lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = GenerationConfig.from_pretrained(lowerCamelCase , config_name=lowerCamelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , lowerCamelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , lowerCamelCase )
def __lowercase ( self : Any ) -> str:
lowerCAmelCase_ : str = AutoConfig.from_pretrained("""gpt2""" )
lowerCAmelCase_ : Dict = GenerationConfig.from_model_config(lowerCamelCase )
lowerCAmelCase_ : int = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(lowerCamelCase , lowerCamelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def __lowercase ( self : Union[str, Any] ) -> Any:
lowerCAmelCase_ : Dict = GenerationConfig()
lowerCAmelCase_ : List[Any] = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(lowerCamelCase )
lowerCAmelCase_ : Tuple = generation_config.update(**lowerCamelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(lowerCamelCase , lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(lowerCamelCase , {"""foo""": """bar"""} )
def __lowercase ( self : List[Any] ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = GenerationConfig()
lowerCAmelCase_ : Optional[Any] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(lowerCamelCase )
lowerCAmelCase_ : List[Any] = GenerationConfig.from_pretrained(lowerCamelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
lowerCAmelCase_ : Optional[Any] = GenerationConfig.from_model_config(lowerCamelCase )
assert not hasattr(lowerCamelCase , """foo""" ) # no new kwargs should be initialized if from config
def __lowercase ( self : Tuple ) -> str:
lowerCAmelCase_ : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , lowerCamelCase )
self.assertEqual(default_config.num_beams , 1 )
lowerCAmelCase_ : List[Any] = GenerationConfig(
do_sample=lowerCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , lowerCamelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = GenerationConfig.from_pretrained(lowerCamelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , lowerCamelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase )
@classmethod
def __lowercase ( cls : Optional[Any] ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def __lowercase ( self : Optional[int] ) -> int:
lowerCAmelCase_ : Tuple = GenerationConfig(
do_sample=lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
lowerCAmelCase_ : List[Any] = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase , repo_id="""test-generation-config""" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : str = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase_ : Dict = GenerationConfig(
do_sample=lowerCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
lowerCAmelCase_ : Dict = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : Union[str, Any] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
| 120
|
'''simple docstring'''
from statistics import mean, stdev
def UpperCamelCase_ ( A__ : list , A__ : int = 3 ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = min(A__ )
lowerCAmelCase_ : Optional[int] = max(A__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , A__ ) for x in data]
def UpperCamelCase_ ( A__ : list , A__ : int = 3 ):
'''simple docstring'''
lowerCAmelCase_ : str = mean(A__ )
lowerCAmelCase_ : List[Any] = stdev(A__ )
# standardize data
return [round((x - mu) / (sigma) , A__ ) for x in data]
| 120
| 1
|
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __lowerCAmelCase (__lowerCAmelCase ):
return (data["data"], data["target"])
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = XGBClassifier()
classifier.fit(__lowerCAmelCase , __lowerCAmelCase )
return classifier
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[Any] = load_iris()
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = data_handling(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.2_5 )
_UpperCAmelCase : List[Any] = iris["target_names"]
# Create an XGBoost Classifier from the training data
_UpperCAmelCase : List[Any] = xgboost(__lowerCAmelCase , __lowerCAmelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , display_labels=__lowerCAmelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 322
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : int=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : List[str]=5 , lowerCamelCase__ : Optional[Any]=4 , lowerCamelCase__ : Optional[int]=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Tuple=4 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Optional[int] = seq_length
_UpperCAmelCase : int = is_training
_UpperCAmelCase : Dict = use_attention_mask
_UpperCAmelCase : Optional[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Dict = num_choices
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Dict = None
if self.use_attention_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : int = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = config_and_inputs
_UpperCAmelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class_name.from_pretrained("albert-base-v2" )
_UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxAlbertModel.from_pretrained("albert-base-v2" )
_UpperCAmelCase : List[Any] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCAmelCase : Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0]
_UpperCAmelCase : List[Any] = (1, 11, 7_68)
self.assertEqual(output.shape , lowerCamelCase__ )
_UpperCAmelCase : str = np.array(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
| 322
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case ( metaclass=__UpperCAmelCase ):
"""simple docstring"""
snake_case__ = ["note_seq"]
def __init__( self : Optional[Any] ,*lowerCamelCase__ : int ,**lowerCamelCase__ : List[Any] ):
requires_backends(self ,['note_seq'] )
@classmethod
def __lowerCAmelCase ( cls : int ,*lowerCamelCase__ : List[Any] ,**lowerCamelCase__ : Tuple ):
requires_backends(cls ,['note_seq'] )
@classmethod
def __lowerCAmelCase ( cls : int ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Optional[Any] ):
requires_backends(cls ,['note_seq'] )
| 98
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = BeautifulSoup(requests.get(lowerCamelCase , params=lowerCamelCase ).content , 'html.parser' )
UpperCAmelCase__ = soup.find('div' , attrs={'class': 'gs_ri'} )
UpperCAmelCase__ = div.find('div' , attrs={'class': 'gs_fl'} ).find_all('a' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 30,
'pages': '3979-3990',
'year': 2_018,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 98
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCAmelCase__ = imread(r"digital_image_processing/image_data/lena_small.jpg")
UpperCAmelCase__ = cvtColor(img, COLOR_BGR2GRAY)
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = cn.convert_to_negative(_UpperCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCAmelCase , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def A ( ) -> Any:
'''simple docstring'''
_UpperCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_UpperCAmelCase = canny.canny(_UpperCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def A ( ) -> Dict:
'''simple docstring'''
assert gg.gaussian_filter(_UpperCAmelCase , 5 , sigma=0.9 ).all()
def A ( ) -> Dict:
'''simple docstring'''
# laplace diagonals
_UpperCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
_UpperCAmelCase = conv.img_convolve(_UpperCAmelCase , _UpperCAmelCase ).astype(_UpperCAmelCase )
assert res.any()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
assert med.median_filter(_UpperCAmelCase , 3 ).any()
def A ( ) -> int:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = sob.sobel_filter(_UpperCAmelCase )
assert grad.any() and theta.any()
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = sp.make_sepia(_UpperCAmelCase , 20 )
assert sepia.all()
def A ( _UpperCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> str:
'''simple docstring'''
_UpperCAmelCase = bs.Burkes(imread(_UpperCAmelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def A ( _UpperCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = rs.NearestNeighbour(imread(_UpperCAmelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
_UpperCAmelCase = imread(_UpperCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = image[x_coordinate][y_coordinate]
_UpperCAmelCase = lbp.get_neighbors_pixel(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_UpperCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_UpperCAmelCase = lbp.local_binary_value(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
assert lbp_image.any()
| 290
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class __lowerCAmelCase :
def __init__( self : str , A : str , A : Dict=13 , A : int=7 , A : Tuple=True , A : Union[str, Any]=True , A : Any=True , A : Dict=True , A : Dict=99 , A : Tuple=32 , A : Any=2 , A : Any=4 , A : Any=37 , A : Optional[Any]="gelu" , A : List[Any]=0.1 , A : Tuple=0.1 , A : Optional[Any]=5_12 , A : Tuple=16 , A : int=2 , A : List[str]=0.0_2 , A : int=False , A : List[Any]=True , A : Optional[Any]="None" , A : Union[str, Any]=3 , A : List[str]=4 , A : List[Any]=None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = relative_attention
_UpperCAmelCase = position_biased_input
_UpperCAmelCase = pos_att_type
_UpperCAmelCase = scope
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int] , A : Tuple , A : int , A : Any , A : List[str] , A : List[str] , A : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel(config=A)
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(A)
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : str , A : Tuple , A : Tuple , A : Optional[int] , A : List[str] , A : Any , A : List[str] , A : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaForMaskedLM(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : List[Any] , A : Tuple , A : Tuple , A : Optional[int] , A : Optional[int] , A : List[Any] , A : Any , A : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDebertaVaForSequenceClassification(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Union[str, Any] , A : List[Any] , A : List[Any] , A : List[str] , A : Optional[Any] , A : int , A : Any , A : int) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFDebertaVaForTokenClassification(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : List[Any] , A : List[Any] , A : List[str] , A : Dict , A : Dict , A : Any , A : Tuple , A : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaForQuestionAnswering(config=A)
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(A)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=A , hidden_size=37)
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A)
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A)
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A)
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A)
@slow
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge')
self.assertIsNotNone(A)
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet')
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
pass
@slow
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge')
_UpperCAmelCase = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]])
_UpperCAmelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_UpperCAmelCase = model(A , attention_mask=A)[0]
_UpperCAmelCase = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , A , atol=1E-4)
| 290
| 1
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : str = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class _a ( lowerCAmelCase):
"""simple docstring"""
UpperCamelCase__ = """mvp"""
UpperCamelCase__ = ["""past_key_values"""]
UpperCamelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : int , __UpperCamelCase : str=5_0_2_6_7 , __UpperCamelCase : int=1_0_2_4 , __UpperCamelCase : Tuple=1_2 , __UpperCamelCase : str=4_0_9_6 , __UpperCamelCase : List[str]=1_6 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : List[str]=4_0_9_6 , __UpperCamelCase : Tuple=1_6 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : List[str]=1_0_2_4 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Tuple=False , __UpperCamelCase : Any=True , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : str=2 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Dict=False , __UpperCamelCase : Optional[int]=1_0_0 , __UpperCamelCase : int=8_0_0 , **__UpperCamelCase : int , )->Optional[int]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = classifier_dropout
_UpperCAmelCase = use_cache
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = use_prompt
_UpperCAmelCase = prompt_length
_UpperCAmelCase = prompt_mid_dim
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , forced_eos_token_id=__UpperCamelCase , **__UpperCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCamelCase ):
_UpperCAmelCase = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'''The config can simply be saved and uploaded again to be fixed.''' )
| 260
|
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase ( _SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
_UpperCAmelCase = args.pruning_method
_UpperCAmelCase = args.threshold
_UpperCAmelCase = args.model_name_or_path.rstrip('''/''' )
_UpperCAmelCase = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
_UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
_UpperCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
_UpperCAmelCase = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
_UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_UpperCAmelCase = name[:-6]
_UpperCAmelCase = model[f'{prefix_}mask_scores']
_UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1
_UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = s * (r - l) + l
_UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
_UpperCAmelCase = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
_UpperCAmelCase = os.path.join(
os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(f'\nCreated folder {target_model_path}' )
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
__A : Optional[int] = parser.parse_args()
main(args)
| 260
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Optional[int]=1.0 , lowercase__ : int=None , lowercase__ : Tuple=None ) -> Tuple:
'''simple docstring'''
if rng is None:
lowerCAmelCase_ : int = global_rng
lowerCAmelCase_ : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __a ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Optional[Any]=4_00 , UpperCAmelCase : Union[str, Any]=20_00 , UpperCAmelCase : str=1 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : List[str]=1_60_00 , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[str]=True , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : int = batch_size
lowerCAmelCase_ : List[str] = min_seq_length
lowerCAmelCase_ : int = max_seq_length
lowerCAmelCase_ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase_ : Optional[int] = feature_size
lowerCAmelCase_ : Optional[int] = padding_value
lowerCAmelCase_ : List[Any] = sampling_rate
lowerCAmelCase_ : Tuple = return_attention_mask
lowerCAmelCase_ : List[Any] = do_normalize
def A ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A ( self : str , UpperCAmelCase : List[Any]=False , UpperCAmelCase : List[Any]=False ):
def _flatten(UpperCAmelCase : int ):
return list(itertools.chain(*UpperCAmelCase ) )
if equal_length:
lowerCAmelCase_ : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase_ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase_ : Optional[int] = [np.asarray(UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = WavaVecaFeatureExtractor
def A ( self : Dict ):
lowerCAmelCase_ : str = WavaVecaFeatureExtractionTester(self )
def A ( self : str , UpperCAmelCase : Dict ):
self.assertTrue(np.all(np.mean(UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def A ( self : Dict ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : List[Any] = [np.asarray(UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase_ : List[str] = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
lowerCAmelCase_ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test batched
lowerCAmelCase_ : str = feat_extract(UpperCAmelCase , return_tensors="""np""" ).input_values
lowerCAmelCase_ : Any = feat_extract(UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase_ : Any = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCAmelCase_ : Union[str, Any] = np.asarray(UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = feat_extract(UpperCAmelCase , return_tensors="""np""" ).input_values
lowerCAmelCase_ : List[Any] = feat_extract(UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A ( self : Any ):
lowerCAmelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : List[str] = ["""longest""", """max_length""", """do_not_pad"""]
lowerCAmelCase_ : Optional[Any] = [None, 16_00, None]
for max_length, padding in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : Any = feat_extract(UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors="""np""" )
lowerCAmelCase_ : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def A ( self : int ):
lowerCAmelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : Dict = range(8_00 , 14_00 , 2_00 )
lowerCAmelCase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase_ : Optional[int] = ["""longest""", """max_length""", """do_not_pad"""]
lowerCAmelCase_ : Tuple = [None, 16_00, None]
for max_length, padding in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : int = feat_extract(UpperCAmelCase , max_length=UpperCAmelCase , padding=UpperCAmelCase )
lowerCAmelCase_ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def A ( self : Any ):
lowerCAmelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : List[str] = feat_extract(
UpperCAmelCase , truncation=UpperCAmelCase , max_length=10_00 , padding="""max_length""" , return_tensors="""np""" )
lowerCAmelCase_ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def A ( self : Dict ):
lowerCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : str = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : List[str] = feat_extract(
UpperCAmelCase , truncation=UpperCAmelCase , max_length=10_00 , padding="""longest""" , return_tensors="""np""" )
lowerCAmelCase_ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
lowerCAmelCase_ : str = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : Any = feat_extract(
UpperCAmelCase , truncation=UpperCAmelCase , max_length=20_00 , padding="""longest""" , return_tensors="""np""" )
lowerCAmelCase_ : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
@require_torch
def A ( self : str ):
import torch
lowerCAmelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : Optional[Any] = np.random.rand(1_00 ).astype(np.floataa )
lowerCAmelCase_ : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase_ : Optional[int] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase_ : str = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def A ( self : Tuple ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCAmelCase_ : Union[str, Any] = WavaVecaConfig.from_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 364
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> List[str]:
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCAmelCase_ : Dict = checkpoint["""input_conv.weight_g"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.weight_v"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase_ : Tuple = checkpoint[f'upsamples.{i}.1.weight_g']
lowerCAmelCase_ : Any = checkpoint[f'upsamples.{i}.1.weight_v']
lowerCAmelCase_ : int = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
lowerCAmelCase_ : Tuple = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint["""output_conv.1.weight_g"""]
lowerCAmelCase_ : Dict = checkpoint["""output_conv.1.weight_v"""]
lowerCAmelCase_ : Optional[int] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ : Any = SpeechTaHifiGanConfig()
lowerCAmelCase_ : str = SpeechTaHifiGan(lowercase__ )
lowerCAmelCase_ : Tuple = torch.load(lowercase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase__ , lowercase__ )
lowerCAmelCase_ : Optional[int] = np.load(lowercase__ )
lowerCAmelCase_ : Any = stats[0].reshape(-1 )
lowerCAmelCase_ : List[str] = stats[1].reshape(-1 )
lowerCAmelCase_ : Optional[int] = torch.from_numpy(lowercase__ ).float()
lowerCAmelCase_ : Any = torch.from_numpy(lowercase__ ).float()
model.save_pretrained(lowercase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28
| 0
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def snake_case_ ( _lowerCAmelCase : List[str] ) -> Tuple:
def wrapper(*_lowerCAmelCase : Any , **_lowerCAmelCase : Union[str, Any] ):
UpperCAmelCase : Any = timeit.default_timer()
UpperCAmelCase : int = func(*__A , **__A )
UpperCAmelCase : Union[str, Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase : Optional[Any] = func.__name__
return wrapper
def snake_case_ ( _lowerCAmelCase : dict , _lowerCAmelCase : str=100 , _lowerCAmelCase : Dict=None ) -> str:
UpperCAmelCase : List[Any] = []
UpperCAmelCase : Tuple = seq_shapes or {}
for i in range(__A ):
UpperCAmelCase : List[Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__A , _ArrayXD ):
UpperCAmelCase : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__A , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase : List[Any] = '''The small grey turtle was surprisingly fast when challenged.'''
else:
UpperCAmelCase : Optional[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__A , datasets.Sequence ):
while isinstance(__A , datasets.Sequence ):
UpperCAmelCase : Optional[Any] = v.feature
UpperCAmelCase : Optional[int] = seq_shapes[k]
UpperCAmelCase : Union[str, Any] = np.random.rand(*__A ).astype(v.dtype )
UpperCAmelCase : List[str] = data
dummy_data.append((i, example) )
return dummy_data
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict=100 , _lowerCAmelCase : str=None ) -> Optional[Any]:
UpperCAmelCase : List[Any] = generate_examples(__A , num_examples=__A , seq_shapes=__A )
with ArrowWriter(features=__A , path=__A ) as writer:
for key, record in dummy_data:
UpperCAmelCase : Union[str, Any] = features.encode_example(__A )
writer.write(__A )
UpperCAmelCase , UpperCAmelCase : str = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
UpperCAmelCase : Dict = datasets.Dataset.from_file(filename=__A , info=datasets.DatasetInfo(features=__A ) )
return dataset
| 23
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Dict = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = ["MBartTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = ["MBartTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
"MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"MBartForCausalLM",
"MBartForConditionalGeneration",
"MBartForQuestionAnswering",
"MBartForSequenceClassification",
"MBartModel",
"MBartPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
"TFMBartForConditionalGeneration",
"TFMBartModel",
"TFMBartPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] = [
"FlaxMBartForConditionalGeneration",
"FlaxMBartForQuestionAnswering",
"FlaxMBartForSequenceClassification",
"FlaxMBartModel",
"FlaxMBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 51
| 0
|
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowercase__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , _UpperCAmelCase ).groups()[0]
class a__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : str, lowerCAmelCase : Dict, lowerCAmelCase : List[Any]=None, lowerCAmelCase : Optional[Any]=None ) -> Any:
lowercase : Dict = file_names
lowercase : str = image_transform
lowercase : Union[str, Any] = label_to_id
def __len__( self : Tuple ) -> str:
return len(self.file_names )
def __getitem__( self : str, lowerCAmelCase : Union[str, Any] ) -> Optional[Any]:
lowercase : Any = self.file_names[idx]
lowercase : int = PIL.Image.open(lowerCAmelCase )
lowercase : int = raw_image.convert('RGB' )
if self.image_transform is not None:
lowercase : Union[str, Any] = self.image_transform(lowerCAmelCase )
lowercase : List[Any] = extract_label(lowerCAmelCase )
if self.label_to_id is not None:
lowercase : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
if args.with_tracking:
lowercase : int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowercase : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase : Tuple = config['lr']
lowercase : int = int(config['num_epochs'] )
lowercase : Union[str, Any] = int(config['seed'] )
lowercase : Optional[int] = int(config['batch_size'] )
lowercase : Dict = config['image_size']
if not isinstance(_UpperCAmelCase , (list, tuple) ):
lowercase : Optional[int] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowercase : Optional[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowercase : List[Any] = int(args.checkpointing_steps )
else:
raise ValueError(
f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
lowercase : Union[str, Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowercase : Dict = os.path.split(_UpperCAmelCase )[-1].split('.' )[0]
accelerator.init_trackers(_UpperCAmelCase , _UpperCAmelCase )
# Grab all the image filenames
lowercase : str = [os.path.join(args.data_dir , _UpperCAmelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowercase : Optional[int] = [extract_label(_UpperCAmelCase ) for fname in file_names]
lowercase : Dict = list(set(_UpperCAmelCase ) )
id_to_label.sort()
lowercase : Tuple = {lbl: i for i, lbl in enumerate(_UpperCAmelCase )}
# Set the seed before splitting the data.
np.random.seed(_UpperCAmelCase )
torch.manual_seed(_UpperCAmelCase )
torch.cuda.manual_seed_all(_UpperCAmelCase )
# Split our filenames between train and validation
lowercase : Optional[Any] = np.random.permutation(len(_UpperCAmelCase ) )
lowercase : int = int(0.8 * len(_UpperCAmelCase ) )
lowercase : str = random_perm[:cut]
lowercase : Optional[int] = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowercase : List[Any] = Compose([RandomResizedCrop(_UpperCAmelCase , scale=(0.5, 1.0) ), ToTensor()] )
lowercase : Any = PetsDataset(
[file_names[i] for i in train_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# For evaluation, we use a deterministic Resize
lowercase : Any = Compose([Resize(_UpperCAmelCase ), ToTensor()] )
lowercase : Union[str, Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=_UpperCAmelCase , label_to_id=_UpperCAmelCase )
# Instantiate dataloaders.
lowercase : List[Any] = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
lowercase : Optional[int] = DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , batch_size=_UpperCAmelCase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase : Any = create_model('resnet50d' , pretrained=_UpperCAmelCase , num_classes=len(_UpperCAmelCase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase : Dict = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowercase : Dict = False
for param in model.get_classifier().parameters():
lowercase : Union[str, Any] = True
# We normalize the batches of images to be a bit faster.
lowercase : List[str] = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowercase : Tuple = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowercase : Dict = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowercase : Any = OneCycleLR(optimizer=_UpperCAmelCase , max_lr=_UpperCAmelCase , epochs=_UpperCAmelCase , steps_per_epoch=len(_UpperCAmelCase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[int] = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowercase : Optional[Any] = 0
# We also need to keep track of the starting epoch so files are named properly
lowercase : int = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
lowercase : Optional[int] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowercase : Tuple = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowercase : Dict = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowercase : Optional[Any] = os.path.splitext(_UpperCAmelCase )[0]
if "epoch" in training_difference:
lowercase : Optional[int] = int(training_difference.replace('epoch_' , '' ) ) + 1
lowercase : Tuple = None
else:
lowercase : str = int(training_difference.replace('step_' , '' ) )
lowercase : Optional[int] = resume_step // len(_UpperCAmelCase )
resume_step -= starting_epoch * len(_UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase , _UpperCAmelCase ):
model.train()
if args.with_tracking:
lowercase : List[str] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowercase : Union[str, Any] = accelerator.skip_first_batches(_UpperCAmelCase , _UpperCAmelCase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowercase : Tuple = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase : Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase : int = (batch['image'] - mean) / std
lowercase : str = model(_UpperCAmelCase )
lowercase : Dict = torch.nn.functional.cross_entropy(_UpperCAmelCase , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase : List[str] = f'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowercase : Union[str, Any] = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
model.eval()
lowercase : Union[str, Any] = 0
lowercase : List[str] = 0
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowercase : Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowercase : Dict = (batch['image'] - mean) / std
with torch.no_grad():
lowercase : int = model(_UpperCAmelCase )
lowercase : List[Any] = outputs.argmax(dim=-1 )
lowercase , lowercase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['label']) )
lowercase : List[str] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowercase : Optional[int] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}: {1_00 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 1_00 * eval_metric,
'train_loss': total_loss.item() / len(_UpperCAmelCase ),
'epoch': epoch,
} , step=_UpperCAmelCase , )
if checkpointing_steps == "epoch":
lowercase : int = f'''epoch_{epoch}'''
if args.output_dir is not None:
lowercase : Dict = os.path.join(args.output_dir , _UpperCAmelCase )
accelerator.save_state(_UpperCAmelCase )
if args.with_tracking:
accelerator.end_training()
def lowercase__ ( ) -> Dict:
'''simple docstring'''
lowercase : Any = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=_UpperCAmelCase , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=_UpperCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=_UpperCAmelCase , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowercase : Optional[Any] = parser.parse_args()
lowercase : Tuple = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 2_24}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 53
|
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_UpperCamelCase: Any = logging.get_logger(__name__)
class a__ ( SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase = ['pixel_values']
def __init__( self : Tuple, lowerCAmelCase : bool = True, lowerCAmelCase : Union[int, float] = 1 / 255, lowerCAmelCase : bool = True, lowerCAmelCase : int = 8, **lowerCAmelCase : Optional[int], ) -> None:
super().__init__(**lowerCAmelCase )
lowercase : Dict = do_rescale
lowercase : Tuple = rescale_factor
lowercase : List[str] = do_pad
lowercase : int = pad_size
def lowercase ( self : List[Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : float, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCAmelCase : int ) -> np.ndarray:
return rescale(lowerCAmelCase, scale=lowerCAmelCase, data_format=lowerCAmelCase, **lowerCAmelCase )
def lowercase ( self : Union[str, Any], lowerCAmelCase : np.ndarray, lowerCAmelCase : int, lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None ) -> List[Any]:
lowercase , lowercase : Tuple = get_image_size(lowerCAmelCase )
lowercase : Optional[Any] = (old_height // size + 1) * size - old_height
lowercase : Dict = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase, ((0, pad_height), (0, pad_width)), mode='symmetric', data_format=lowerCAmelCase )
def lowercase ( self : Any, lowerCAmelCase : ImageInput, lowerCAmelCase : Optional[bool] = None, lowerCAmelCase : Optional[float] = None, lowerCAmelCase : Optional[bool] = None, lowerCAmelCase : Optional[int] = None, lowerCAmelCase : Optional[Union[str, TensorType]] = None, lowerCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST, **lowerCAmelCase : Any, ) -> List[Any]:
lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Any = do_pad if do_pad is not None else self.do_pad
lowercase : int = pad_size if pad_size is not None else self.pad_size
lowercase : Tuple = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowercase : Dict = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_rescale:
lowercase : Optional[int] = [self.rescale(image=lowerCAmelCase, scale=lowerCAmelCase ) for image in images]
if do_pad:
lowercase : List[str] = [self.pad(lowerCAmelCase, size=lowerCAmelCase ) for image in images]
lowercase : Optional[int] = [to_channel_dimension_format(lowerCAmelCase, lowerCAmelCase ) for image in images]
lowercase : Tuple = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase, tensor_type=lowerCAmelCase )
| 53
| 1
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _a ( SCREAMING_SNAKE_CASE : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def _a ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> XGBClassifier:
"""simple docstring"""
__lowerCAmelCase: int = XGBClassifier()
classifier.fit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return classifier
def _a ( ) -> None:
"""simple docstring"""
__lowerCAmelCase: Any = load_iris()
__lowerCAmelCase , __lowerCAmelCase: Tuple = data_handling(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: int = train_test_split(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , test_size=0.2_5 )
__lowerCAmelCase: Optional[Any] = iris['target_names']
# Create an XGBoost Classifier from the training data
__lowerCAmelCase: List[str] = xgboost(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , display_labels=SCREAMING_SNAKE_CASE , cmap='Blues' , normalize='true' , )
plt.title('Normalized Confusion Matrix - IRIS Dataset' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 322
|
def _a ( SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase: str = len(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = sum(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: str = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowerCAmelCase: Tuple = True
for i in range(1 , s + 1 ):
__lowerCAmelCase: Any = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowerCAmelCase: Optional[int] = dp[i][j - 1]
if arr[i - 1] <= j:
__lowerCAmelCase: Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowerCAmelCase: Tuple = s - 2 * j
break
return diff
| 322
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__a :Optional[int] = TypeVar('T')
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
return (position - 1) // 2
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
return (2 * position) + 1
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
return (2 * position) + 2
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : int ):
A_ = []
A_ = {}
A_ = 0
def __len__( self : Tuple ):
return self.elements
def __repr__( self : Union[str, Any] ):
return str(self.heap )
def __A ( self : List[str] ):
# Check if the priority queue is empty
return self.elements == 0
def __A ( self : int , UpperCAmelCase : T , UpperCAmelCase : int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
A_ = self.elements
self.elements += 1
self._bubble_up(UpperCAmelCase )
def __A ( self : str ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
A_ , A_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
A_ , A_ = self.heap[0]
self._bubble_down(UpperCAmelCase )
return elem
def __A ( self : Tuple , UpperCAmelCase : T , UpperCAmelCase : int ):
# Update the weight of the given key
A_ = self.position_map[elem]
A_ = (elem, weight)
if position > 0:
A_ = get_parent_position(UpperCAmelCase )
A_ , A_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(UpperCAmelCase )
else:
self._bubble_down(UpperCAmelCase )
else:
self._bubble_down(UpperCAmelCase )
def __A ( self : Optional[int] , UpperCAmelCase : T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
A_ = self.position_map[elem]
if curr_pos == 0:
return None
A_ = get_parent_position(UpperCAmelCase )
A_ , A_ = self.heap[curr_pos]
A_ , A_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(UpperCAmelCase , UpperCAmelCase )
return self._bubble_up(UpperCAmelCase )
return None
def __A ( self : str , UpperCAmelCase : T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
A_ = self.position_map[elem]
A_ , A_ = self.heap[curr_pos]
A_ = get_child_left_position(UpperCAmelCase )
A_ = get_child_right_position(UpperCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
A_ , A_ = self.heap[child_left_position]
A_ , A_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(UpperCAmelCase , UpperCAmelCase )
return self._bubble_down(UpperCAmelCase )
if child_left_position < self.elements:
A_ , A_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(UpperCAmelCase , UpperCAmelCase )
return self._bubble_down(UpperCAmelCase )
else:
return None
if child_right_position < self.elements:
A_ , A_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(UpperCAmelCase , UpperCAmelCase )
return self._bubble_down(UpperCAmelCase )
return None
def __A ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int ):
# Swap the nodes at the given positions
A_ = self.heap[nodea_pos][0]
A_ = self.heap[nodea_pos][0]
A_ , A_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
A_ = nodea_pos
A_ = nodea_pos
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : Dict ):
A_ = {}
A_ = 0
def __repr__( self : Tuple ):
return str(self.connections )
def __len__( self : List[str] ):
return self.nodes
def __A ( self : Optional[int] , UpperCAmelCase : T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
A_ = {}
self.nodes += 1
def __A ( self : Any , UpperCAmelCase : T , UpperCAmelCase : T , UpperCAmelCase : int ):
# Add an edge between 2 nodes in the graph
self.add_node(UpperCAmelCase )
self.add_node(UpperCAmelCase )
A_ = weight
A_ = weight
def __snake_case ( __UpperCamelCase : GraphUndirectedWeighted[T] ,):
"""simple docstring"""
A_ = {node: maxsize for node in graph.connections}
A_ = {node: None for node in graph.connections}
A_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__UpperCamelCase ,__UpperCamelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
A_ = priority_queue.extract_min()
A_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCamelCase ,dist[neighbour] )
A_ = node
# running prim's algorithm
while not priority_queue.is_empty():
A_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCamelCase ,dist[neighbour] )
A_ = node
return dist, parent
| 329
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__a :int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__a :Any = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
__a :Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
__a :str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
__a :List[str] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
__a :Any = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 329
| 1
|
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
lowercase__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
for attribute in key.split('.' ):
a__: List[str] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if weight_type is not None:
a__: Tuple = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).shape
else:
a__: Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
a__: Dict = value
elif weight_type == "weight_g":
a__: Union[str, Any] = value
elif weight_type == "weight_v":
a__: Union[str, Any] = value
elif weight_type == "bias":
a__: Optional[Any] = value
else:
a__: Any = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: int = []
a__: Optional[Any] = fairseq_model.state_dict()
a__: str = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
a__: str = False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
a__: int = True
else:
for key, mapped_key in MAPPING.items():
a__: int = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
a__: Dict = True
if "*" in mapped_key:
a__: Union[str, Any] = name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
a__: Optional[int] = mapped_key.replace('*' , _SCREAMING_SNAKE_CASE )
if "weight_g" in name:
a__: List[str] = 'weight_g'
elif "weight_v" in name:
a__: List[Any] = 'weight_v'
elif "bias" in name:
a__: Any = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__: List[Any] = 'weight'
else:
a__: List[Any] = None
set_recursively(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(F'Unused weights: {unused_weights}' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
a__: int = full_name.split('conv_layers.' )[-1]
a__: List[str] = name.split('.' )
a__: List[Any] = int(items[0] )
a__: Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
a__: List[Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
a__: Any = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.' )
a__: Union[str, Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' )
a__: List[Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True ) ->Optional[int]:
if config_path is not None:
a__: int = UniSpeechSatConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
a__: Dict = UniSpeechSatConfig()
a__: Any = ''
if is_finetuned:
a__: str = UniSpeechSatForCTC(_SCREAMING_SNAKE_CASE )
else:
a__: Union[str, Any] = UniSpeechSatForPreTraining(_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
a__: int = model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 290
|
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
while a != 0:
a__ , a__: List[str] = b % a, a
return b
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->int:
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) != 1:
a__: Dict = F'mod inverse of {a!r} and {m!r} does not exist'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Union[str, Any] = 1, 0, a
a__ , a__ , a__: Any = 0, 1, m
while va != 0:
a__: int = ua // va
a__ , a__ , a__ , a__ , a__ , a__: Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 290
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class A_ ( _a ):
lowerCAmelCase__ = 'megatron-bert'
def __init__( self: Any ,__lowerCAmelCase: List[Any]=29_056 ,__lowerCAmelCase: Optional[Any]=1_024 ,__lowerCAmelCase: int=24 ,__lowerCAmelCase: Union[str, Any]=16 ,__lowerCAmelCase: Optional[int]=4_096 ,__lowerCAmelCase: str="gelu" ,__lowerCAmelCase: int=0.1 ,__lowerCAmelCase: Tuple=0.1 ,__lowerCAmelCase: List[Any]=512 ,__lowerCAmelCase: Union[str, Any]=2 ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: List[str]=1e-12 ,__lowerCAmelCase: List[str]=0 ,__lowerCAmelCase: str="absolute" ,__lowerCAmelCase: Union[str, Any]=True ,**__lowerCAmelCase: Dict ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Dict = vocab_size
_lowerCamelCase : Union[str, Any] = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : List[Any] = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = position_embedding_type
_lowerCamelCase : Tuple = use_cache
| 340
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : int = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
_lowerCamelCase : Tuple = "segformer.encoder." + key
if key.startswith("backbone" ):
_lowerCamelCase : Any = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCamelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_lowerCamelCase : int = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowerCamelCase )-1}""" )
if "norm" in key:
_lowerCamelCase : Optional[Any] = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCamelCase : Dict = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
_lowerCamelCase : Tuple = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowerCamelCase )-1}""" )
if "layer_norm1" in key:
_lowerCamelCase : Union[str, Any] = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_lowerCamelCase : int = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_lowerCamelCase : Union[str, Any] = key[key.find("block" ) + len("block" )]
_lowerCamelCase : Optional[Any] = key.replace(F"""block{idx}""" , F"""block.{int(_lowerCamelCase )-1}""" )
if "attn.q" in key:
_lowerCamelCase : Optional[int] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_lowerCamelCase : List[str] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_lowerCamelCase : Tuple = key.replace("attn" , "attention.self" )
if "fc1" in key:
_lowerCamelCase : Optional[Any] = key.replace("fc1" , "dense1" )
if "fc2" in key:
_lowerCamelCase : Dict = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_lowerCamelCase : int = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_lowerCamelCase : str = key.replace("linear_fuse.conv" , "linear_fuse" )
_lowerCamelCase : Optional[Any] = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCamelCase : Union[str, Any] = key[key.find("linear_c" ) + len("linear_c" )]
_lowerCamelCase : Optional[int] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowerCamelCase )-1}""" )
if key.startswith("head" ):
_lowerCamelCase : List[str] = key.replace("head" , "classifier" )
_lowerCamelCase : Union[str, Any] = value
return new_state_dict
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_lowerCamelCase : Optional[Any] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_lowerCamelCase : int = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCamelCase : int = kv_bias[: config.hidden_sizes[i]]
_lowerCamelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCamelCase : Optional[Any] = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase_( ) -> Dict:
'''simple docstring'''
_lowerCamelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCamelCase : Union[str, Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Any = SegformerConfig()
_lowerCamelCase : int = False
# set attributes based on model_name
_lowerCamelCase : Any = "huggingface/label-files"
if "segformer" in model_name:
_lowerCamelCase : str = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
_lowerCamelCase : str = 150
_lowerCamelCase : Dict = "ade20k-id2label.json"
_lowerCamelCase : Dict = (1, 150, 128, 128)
elif "city" in model_name:
_lowerCamelCase : List[str] = 19
_lowerCamelCase : Tuple = "cityscapes-id2label.json"
_lowerCamelCase : Tuple = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
_lowerCamelCase : List[str] = True
_lowerCamelCase : Tuple = model_name[4:6]
_lowerCamelCase : Tuple = 1000
_lowerCamelCase : List[Any] = "imagenet-1k-id2label.json"
_lowerCamelCase : List[Any] = (1, 1000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
_lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : List[str] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[Any] = idalabel
_lowerCamelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : int = 256
elif size == "b2":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : List[Any] = 768
_lowerCamelCase : Any = [3, 4, 6, 3]
elif size == "b3":
_lowerCamelCase : Tuple = [64, 128, 320, 512]
_lowerCamelCase : Union[str, Any] = 768
_lowerCamelCase : Optional[Any] = [3, 4, 18, 3]
elif size == "b4":
_lowerCamelCase : str = [64, 128, 320, 512]
_lowerCamelCase : Optional[Any] = 768
_lowerCamelCase : Dict = [3, 8, 27, 3]
elif size == "b5":
_lowerCamelCase : int = [64, 128, 320, 512]
_lowerCamelCase : Tuple = 768
_lowerCamelCase : Tuple = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
_lowerCamelCase : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_lowerCamelCase , align=_lowerCamelCase , do_random_crop=_lowerCamelCase )
# prepare image
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Dict = image_processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
_lowerCamelCase : Tuple = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
else:
_lowerCamelCase : int = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
_lowerCamelCase : str = rename_keys(_lowerCamelCase , encoder_only=_lowerCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(_lowerCamelCase , _lowerCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[int] = SegformerForImageClassification(_lowerCamelCase )
else:
_lowerCamelCase : List[str] = SegformerForSemanticSegmentation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# forward pass
_lowerCamelCase : Any = model(_lowerCamelCase )
_lowerCamelCase : Dict = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-7.5_8_2_0, -8.7_2_3_1, -8.3_2_1_5], [-8.0_6_0_0, -1_0.3_5_2_9, -1_0.0_3_0_4], [-7.5_2_0_8, -9.4_1_0_3, -9.6_2_3_9]],
[[-1_2.6_9_1_8, -1_3.8_9_9_4, -1_3.7_1_3_7], [-1_3.3_1_9_6, -1_5.7_5_2_3, -1_5.4_7_8_9], [-1_2.9_3_4_3, -1_4.8_7_5_7, -1_4.9_6_8_9]],
[[-1_1.1_9_1_1, -1_1.9_4_2_1, -1_1.3_2_4_3], [-1_1.3_3_4_2, -1_3.6_8_3_9, -1_3.3_5_8_1], [-1_0.3_9_0_9, -1_2.1_8_3_2, -1_2.4_8_5_8]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_lowerCamelCase : int = torch.tensor(
[
[[-1_1.8_1_7_3, -1_4.3_8_5_0, -1_6.3_1_2_8], [-1_4.5_6_4_8, -1_6.5_8_0_4, -1_8.6_5_6_8], [-1_4.7_2_2_3, -1_5.7_3_8_7, -1_8.4_2_1_8]],
[[-1_5.7_2_9_0, -1_7.9_1_7_1, -1_9.4_4_2_3], [-1_8.3_1_0_5, -1_9.9_4_4_8, -2_1.4_6_6_1], [-1_7.9_2_9_6, -1_8.6_4_9_7, -2_0.7_9_1_0]],
[[-1_5.0_7_8_3, -1_7.0_3_3_6, -1_8.2_7_8_9], [-1_6.8_7_7_1, -1_8.6_8_7_0, -2_0.1_6_1_2], [-1_6.2_4_5_4, -1_7.1_4_2_6, -1_9.5_0_5_5]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[[-9.0_8_7_8, -1_0.2_0_8_1, -1_0.1_8_9_1], [-9.3_1_4_4, -1_0.7_9_4_1, -1_0.9_8_4_3], [-9.2_2_9_4, -1_0.3_8_5_5, -1_0.5_7_0_4]],
[[-1_2.2_3_1_6, -1_3.9_0_6_8, -1_3.6_1_0_2], [-1_2.9_1_6_1, -1_4.3_7_0_2, -1_4.3_2_3_5], [-1_2.5_2_3_3, -1_3.7_1_7_4, -1_3.7_9_3_2]],
[[-1_4.6_2_7_5, -1_5.2_4_9_0, -1_4.9_7_2_7], [-1_4.3_4_0_0, -1_5.9_6_8_7, -1_6.2_8_2_7], [-1_4.1_4_8_4, -1_5.4_0_3_3, -1_5.8_9_3_7]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_2.3_1_4_4, -1_3.2_4_4_7, -1_4.0_8_0_2], [-1_3.3_6_1_4, -1_4.5_8_1_6, -1_5.6_1_1_7], [-1_3.3_3_4_0, -1_4.4_4_3_3, -1_6.2_2_1_9]],
[[-1_9.2_7_8_1, -2_0.4_1_2_8, -2_0.7_5_0_6], [-2_0.6_1_5_3, -2_1.6_5_6_6, -2_2.0_9_9_8], [-1_9.9_8_0_0, -2_1.0_4_3_0, -2_2.1_4_9_4]],
[[-1_8.8_7_3_9, -1_9.7_8_0_4, -2_1.1_8_3_4], [-2_0.1_2_3_3, -2_1.6_7_6_5, -2_3.2_9_4_4], [-2_0.0_3_1_5, -2_1.2_6_4_1, -2_3.6_9_4_4]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-9.5_5_2_4, -1_2.0_8_3_5, -1_1.7_3_4_8], [-1_0.5_2_2_9, -1_3.6_4_4_6, -1_4.5_6_6_2], [-9.5_8_4_2, -1_2.8_8_5_1, -1_3.9_4_1_4]],
[[-1_5.3_4_3_2, -1_7.5_3_2_3, -1_7.0_8_1_8], [-1_6.3_3_3_0, -1_8.9_2_5_5, -1_9.2_1_0_1], [-1_5.1_3_4_0, -1_7.7_8_4_8, -1_8.3_9_7_1]],
[[-1_2.6_0_7_2, -1_4.9_4_8_6, -1_4.6_6_3_1], [-1_3.7_6_2_9, -1_7.0_9_0_7, -1_7.7_7_4_5], [-1_2.7_8_9_9, -1_6.1_6_9_5, -1_7.1_6_7_1]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_lowerCamelCase : Dict = torch.tensor(
[
[[-1_1.9_2_9_5, -1_3.4_0_5_7, -1_4.8_1_0_6], [-1_3.3_4_3_1, -1_4.8_1_7_9, -1_5.3_7_8_1], [-1_4.2_8_3_6, -1_5.5_9_4_2, -1_6.1_5_8_8]],
[[-1_1.4_9_0_6, -1_2.8_0_6_7, -1_3.6_5_6_4], [-1_3.1_1_8_9, -1_4.0_5_0_0, -1_4.1_5_4_3], [-1_3.8_7_4_8, -1_4.5_1_3_6, -1_4.8_7_8_9]],
[[0.5_3_7_4, 0.1_0_6_7, -0.4_7_4_2], [0.1_1_4_1, -0.2_2_5_5, -0.7_0_9_9], [-0.3_0_0_0, -0.5_9_2_4, -1.3_1_0_5]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_lowerCamelCase : Optional[int] = torch.tensor(
[
[[-7.8_2_1_7, -9.8_7_6_7, -1_0.1_7_1_7], [-9.4_4_3_8, -1_0.9_0_5_8, -1_1.4_0_4_7], [-9.7_9_3_9, -1_2.3_4_9_5, -1_2.1_0_7_9]],
[[-7.1_5_1_4, -9.5_3_3_6, -1_0.0_8_6_0], [-9.7_7_7_6, -1_1.6_8_2_2, -1_1.8_4_3_9], [-1_0.1_4_1_1, -1_2.7_6_5_5, -1_2.8_9_7_2]],
[[0.3_0_2_1, 0.0_8_0_5, -0.2_3_1_0], [-0.0_3_2_8, -0.1_6_0_5, -0.2_7_1_4], [-0.1_4_0_8, -0.5_4_7_7, -0.6_9_7_6]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_lowerCamelCase : Union[str, Any] = torch.tensor(
[
[[-9.4_9_5_9, -1_1.3_0_8_7, -1_1.7_4_7_9], [-1_1.0_0_2_5, -1_2.6_5_4_0, -1_2.3_3_1_9], [-1_1.4_0_6_4, -1_3.0_4_8_7, -1_2.9_9_0_5]],
[[-9.8_9_0_5, -1_1.3_0_8_4, -1_2.0_8_5_4], [-1_1.1_7_2_6, -1_2.7_6_9_8, -1_2.9_5_8_3], [-1_1.5_9_8_5, -1_3.3_2_7_8, -1_4.1_7_7_4]],
[[0.2_2_1_3, 0.0_1_9_2, -0.2_4_6_6], [-0.1_7_3_1, -0.4_2_1_3, -0.4_8_7_4], [-0.3_1_2_6, -0.6_5_4_1, -1.1_3_8_9]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_lowerCamelCase : List[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_lowerCamelCase : Tuple = torch.tensor(
[
[[-1_6.0_9_7_6, -1_6.4_8_5_6, -1_7.3_9_6_2], [-1_6.6_2_3_4, -1_9.0_3_4_2, -1_9.7_6_8_5], [-1_6.0_9_0_0, -1_8.0_6_6_1, -1_9.1_1_8_0]],
[[-1_8.4_7_5_0, -1_8.8_4_8_8, -1_9.5_0_7_4], [-1_9.4_0_3_0, -2_2.1_5_7_0, -2_2.5_9_7_7], [-1_9.1_1_9_1, -2_0.8_4_8_6, -2_2.3_7_8_3]],
[[-4.5_1_7_8, -5.5_0_3_7, -6.5_1_0_9], [-5.0_8_8_4, -7.2_1_7_4, -8.0_3_3_4], [-4.4_1_5_6, -5.8_1_1_7, -7.2_9_7_0]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_lowerCamelCase : Any = torch.tensor(
[
[[-1_4.2_0_8_1, -1_4.4_7_3_2, -1_4.1_9_7_7], [-1_4.5_8_6_7, -1_6.4_4_2_3, -1_6.6_3_5_6], [-1_3.4_4_4_1, -1_4.9_6_8_5, -1_6.8_6_9_6]],
[[-1_4.4_5_7_6, -1_4.7_0_7_3, -1_5.0_4_5_1], [-1_5.0_8_1_6, -1_7.6_2_3_7, -1_7.9_8_7_3], [-1_4.4_2_1_3, -1_6.0_1_9_9, -1_8.5_9_9_2]],
[[-4.7_3_4_9, -4.9_5_8_8, -5.0_9_6_6], [-4.3_2_1_0, -6.9_3_2_5, -7.2_5_9_1], [-3.4_3_1_2, -4.7_4_8_4, -7.1_9_1_7]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_lowerCamelCase : List[str] = torch.tensor(
[
[[-1_1.7_7_3_7, -1_1.9_5_2_6, -1_1.3_2_7_3], [-1_3.6_6_9_2, -1_4.4_5_7_4, -1_3.8_8_7_8], [-1_3.8_9_3_7, -1_4.6_9_2_4, -1_5.9_3_4_5]],
[[-1_4.6_7_0_6, -1_4.5_3_3_0, -1_4.1_3_0_6], [-1_6.1_5_0_2, -1_6.8_1_8_0, -1_6.4_2_6_9], [-1_6.8_3_3_8, -1_7.8_9_3_9, -2_0.1_7_4_6]],
[[1.0_4_9_1, 0.8_2_8_9, 1.0_3_1_0], [1.1_0_4_4, 0.5_2_1_9, 0.8_0_5_5], [1.0_8_9_9, 0.6_9_2_6, 0.5_5_9_0]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_lowerCamelCase : str = torch.tensor(
[
[[-1_2.5_6_4_1, -1_3.4_7_7_7, -1_3.0_6_8_4], [-1_3.9_5_8_7, -1_5.8_9_8_3, -1_6.6_5_5_7], [-1_3.3_1_0_9, -1_5.7_3_5_0, -1_6.3_1_4_1]],
[[-1_4.7_0_7_4, -1_5.4_3_5_2, -1_4.5_9_4_4], [-1_6.6_3_5_3, -1_8.1_6_6_3, -1_8.6_1_2_0], [-1_5.1_7_0_2, -1_8.0_3_2_9, -1_8.1_5_4_7]],
[[-1.7_9_9_0, -2.0_9_5_1, -1.7_7_8_4], [-2.6_3_9_7, -3.8_2_4_5, -3.9_6_8_6], [-1.5_2_6_4, -2.8_1_2_6, -2.9_3_1_6]],
] )
else:
_lowerCamelCase : Dict = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowerCAmelCase : str = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 340
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 315
|
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict:
"""simple docstring"""
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
return jnp.matmul(A__ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = jnp.floataa
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype )
UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
UpperCamelCase = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.vision_model(UpperCamelCase__ )[1]
UpperCamelCase = self.visual_projection(UpperCamelCase__ )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase = 0.0
UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase = is_special_care * 0.0_1
UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CLIPConfig
_SCREAMING_SNAKE_CASE = """clip_input"""
_SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
if input_shape is None:
UpperCamelCase = (1, 2_2_4, 2_2_4, 3)
UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ )
super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init )
def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ):
"""simple docstring"""
UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ )
UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params']
return random_params
def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ):
"""simple docstring"""
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 28
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 363
|
from __future__ import annotations
from math import ceil, floor, sqrt
def __lowercase ( _UpperCamelCase = 2000000 ) ->int:
"""simple docstring"""
lowercase : list[int] = [0]
lowercase : int
for idx in range(1, ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
lowercase : int = 0
# an estimate of b, using the quadratic formula
lowercase : float
# the largest integer less than b_estimate
lowercase : int
# the largest integer less than b_estimate
lowercase : int
# the triangle number corresponding to b_floor
lowercase : int
# the triangle number corresponding to b_ceil
lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:], 1 ):
lowercase : List[str] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowercase : str = floor(_UpperCamelCase )
lowercase : int = ceil(_UpperCamelCase )
lowercase : str = triangle_numbers[b_floor]
lowercase : str = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowercase : Optional[int] = triangle_b_first_guess * triangle_a
lowercase : Tuple = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowercase : Dict = triangle_b_second_guess * triangle_a
lowercase : Any = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 173
| 0
|
'''simple docstring'''
a__ : str ='''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
a__ : Tuple =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a__ : Dict ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 53
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Tuple , __lowercase : Tuple ) -> Tuple:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : List[str]="attention" ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = __UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__UpperCamelCase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__UpperCamelCase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__UpperCamelCase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCamelCase = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__UpperCamelCase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : int , __lowercase : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
if split_mlp_wi:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__UpperCamelCase = (wi_a, wi_a)
else:
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__UpperCamelCase = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] ) -> str:
"""simple docstring"""
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowercase__ ( __lowercase : dict , *, __lowercase : int , __lowercase : bool , __lowercase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = traverse_util.flatten_dict(variables['target'] )
__UpperCamelCase = {'/'.join(__lowercase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCamelCase = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , __lowercase )
__UpperCamelCase = collections.OrderedDict()
# Shared embeddings.
__UpperCamelCase = old['token_embedder/embedding']
# Encoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'encoder' , 'attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'encoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'encoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , __lowercase , 'encoder' ).T
__UpperCamelCase = old['encoder/encoder_norm/scale']
if not scalable_attention:
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'encoder' ).T
__UpperCamelCase = tax_relpos_bias_lookup(
__lowercase , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(__lowercase ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_self_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'self_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 1 (Cross Attention).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_cross_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = tax_attention_lookup(__lowercase , __lowercase , 'decoder' , 'encoder_decoder_attention' )
__UpperCamelCase = layer_norm
__UpperCamelCase = k.T
__UpperCamelCase = o.T
__UpperCamelCase = q.T
__UpperCamelCase = v.T
# Block i, layer 2 (MLP).
__UpperCamelCase = tax_layer_norm_lookup(__lowercase , __lowercase , 'decoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase = tax_mlp_lookup(__lowercase , __lowercase , 'decoder' , __lowercase )
__UpperCamelCase = layer_norm
if split_mlp_wi:
__UpperCamelCase = wi[0].T
__UpperCamelCase = wi[1].T
else:
__UpperCamelCase = wi.T
__UpperCamelCase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase = tax_relpos_bias_lookup(__lowercase , __lowercase , 'decoder' ).T
__UpperCamelCase = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCamelCase = old['decoder/logits_dense/kernel'].T
return new
def lowercase__ ( __lowercase : Optional[Any] , __lowercase : bool ) -> int:
"""simple docstring"""
__UpperCamelCase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__UpperCamelCase = state_dict['shared.weight']
return state_dict
def lowercase__ ( __lowercase : List[str] , __lowercase : Dict , __lowercase : str , __lowercase : int , __lowercase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = checkpoints.load_tax_checkpoint(__lowercase )
__UpperCamelCase = convert_tax_to_pytorch(
__lowercase , num_layers=config.num_layers , is_encoder_only=__lowercase , scalable_attention=__lowercase )
__UpperCamelCase = make_state_dict(__lowercase , __lowercase )
model.load_state_dict(__lowercase , strict=__lowercase )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Dict , __lowercase : List[str] , __lowercase : bool = False , __lowercase : bool = False , ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = MTaConfig.from_json_file(__lowercase )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCamelCase = UMTaEncoderModel(__lowercase )
else:
__UpperCamelCase = UMTaForConditionalGeneration(__lowercase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(__lowercase )
# Verify that we can load the checkpoint.
model.from_pretrained(__lowercase )
print('Done' )
if __name__ == "__main__":
a__ : List[Any] =argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ : List[str] =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 53
| 1
|
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[Any] = int(SCREAMING_SNAKE_CASE )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Tuple = t // 3_600, (t // 60) % 60, t % 60
return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}"""
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=300 ):
'''simple docstring'''
return f"""
<div>
{prefix}
<progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>
{label}
</div>
"""
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f""" <th>{i}</th>\n"""
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__UpperCamelCase :List[Any] = f"""{elt:.6f}""" if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else str(SCREAMING_SNAKE_CASE )
html_code += f""" <td>{elt}</td>\n"""
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCamelCase_ :
'''simple docstring'''
a__ : Union[str, Any] = 5
a__ : int = 0.2
def __init__( self , __lowercase , __lowercase = None , __lowercase = True , __lowercase = None , __lowercase = 300 , ) -> Any:
__UpperCamelCase :Optional[int] = total
__UpperCamelCase :List[Any] = '''''' if prefix is None else prefix
__UpperCamelCase :Optional[Any] = leave
__UpperCamelCase :List[Any] = parent
__UpperCamelCase :List[Any] = width
__UpperCamelCase :int = None
__UpperCamelCase :Tuple = None
__UpperCamelCase :Union[str, Any] = None
def UpperCamelCase__ ( self , __lowercase , __lowercase = False , __lowercase = None) -> Tuple:
__UpperCamelCase :Union[str, Any] = value
if comment is not None:
__UpperCamelCase :Any = comment
if self.last_value is None:
__UpperCamelCase :Optional[int] = time.time()
__UpperCamelCase :Dict = value
__UpperCamelCase :Tuple = None
__UpperCamelCase :int = self.warmup
__UpperCamelCase :Optional[Any] = 1
self.update_bar(__lowercase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total):
if self.first_calls > 0:
self.first_calls -= 1
__UpperCamelCase :Any = time.time()
__UpperCamelCase :Dict = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__UpperCamelCase :Union[str, Any] = self.elapsed_time / (value - self.start_value)
else:
__UpperCamelCase :Union[str, Any] = None
if value >= self.total:
__UpperCamelCase :Dict = self.total
__UpperCamelCase :int = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__UpperCamelCase :Dict = self.average_time_per_item * (self.total - value)
self.update_bar(__lowercase)
__UpperCamelCase :Optional[int] = value
__UpperCamelCase :Tuple = current_time
if self.average_time_per_item is None:
__UpperCamelCase :Dict = 1
else:
__UpperCamelCase :Optional[Any] = max(int(self.update_every / self.average_time_per_item) , 1)
def UpperCamelCase__ ( self , __lowercase , __lowercase=None) -> Optional[int]:
__UpperCamelCase :Optional[int] = ''' ''' * (len(str(self.total)) - len(str(__lowercase))) + str(__lowercase)
if self.elapsed_time is None:
__UpperCamelCase :Optional[int] = f"""[{spaced_value}/{self.total} : < :"""
elif self.predicted_remaining is None:
__UpperCamelCase :Any = f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time)}"""
else:
__UpperCamelCase :Tuple = (
f"""[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <"""
f""" {format_time(self.predicted_remaining)}"""
)
self.label += f""", {1/self.average_time_per_item:.2f} it/s"""
self.label += "]" if self.comment is None or len(self.comment) == 0 else f""", {self.comment}]"""
self.display()
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Union[str, Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__UpperCamelCase :Union[str, Any] = disp.display(disp.HTML(self.html_code) , display_id=__lowercase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase__ ( self) -> Any:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(''''''))
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=None) -> List[Any]:
super().__init__(__lowercase)
__UpperCamelCase :Dict = None if column_names is None else [column_names]
__UpperCamelCase :Union[str, Any] = None
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__UpperCamelCase :Dict = disp.display(disp.HTML(self.html_code) , display_id=__lowercase)
else:
self.output.update(disp.HTML(self.html_code))
def UpperCamelCase__ ( self , __lowercase) -> Optional[int]:
if self.inner_table is None:
__UpperCamelCase :Optional[int] = [list(values.keys()), list(values.values())]
else:
__UpperCamelCase :Optional[int] = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(__lowercase)
__UpperCamelCase :Tuple = columns
self.inner_table.append([values[c] for c in columns])
def UpperCamelCase__ ( self , __lowercase , __lowercase=None , __lowercase=300) -> str:
__UpperCamelCase :List[str] = NotebookProgressBar(__lowercase , prefix=__lowercase , parent=self , width=__lowercase)
return self.child_bar
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :str = None
self.display()
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self) -> Union[str, Any]:
__UpperCamelCase :int = None
__UpperCamelCase :List[Any] = None
__UpperCamelCase :Optional[Any] = False
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> Union[str, Any]:
__UpperCamelCase :Any = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__UpperCamelCase :Any = 0
__UpperCamelCase :Optional[int] = 0
__UpperCamelCase :Optional[Any] = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''')
__UpperCamelCase :Dict = NotebookTrainingTracker(state.max_steps , __lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> Dict:
__UpperCamelCase :Optional[int] = int(state.epoch) if int(state.epoch) == state.epoch else f"""{state.epoch:.2f}"""
self.training_tracker.update(
state.global_step + 1 , comment=f"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , )
__UpperCamelCase :Any = False
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase=None , **__lowercase) -> Any:
if not has_length(__lowercase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__UpperCamelCase :Dict = self.training_tracker.add_child(len(__lowercase))
else:
__UpperCamelCase :Tuple = NotebookProgressBar(len(__lowercase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> List[Any]:
if self.prediction_bar is not None:
self.prediction_bar.close()
__UpperCamelCase :List[Any] = None
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase=None , **__lowercase) -> str:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__UpperCamelCase :List[Any] = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__UpperCamelCase :Tuple = state.global_step
self.training_tracker.write_line(__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase=None , **__lowercase) -> List[Any]:
if self.training_tracker is not None:
__UpperCamelCase :int = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history):
if "loss" in log:
__UpperCamelCase :Any = log['''loss''']
break
if self.first_column == "Epoch":
__UpperCamelCase :Tuple = int(state.epoch)
else:
__UpperCamelCase :Optional[int] = state.global_step
__UpperCamelCase :List[Any] = '''eval'''
for k in metrics:
if k.endswith('''_loss'''):
__UpperCamelCase :Union[str, Any] = re.sub(r'''\_loss$''' , '''''' , __lowercase)
__UpperCamelCase :List[str] = metrics.pop('''total_flos''' , __lowercase)
__UpperCamelCase :Any = metrics.pop('''epoch''' , __lowercase)
__UpperCamelCase :List[Any] = metrics.pop(f"""{metric_key_prefix}_runtime""" , __lowercase)
__UpperCamelCase :Optional[int] = metrics.pop(f"""{metric_key_prefix}_samples_per_second""" , __lowercase)
__UpperCamelCase :Any = metrics.pop(f"""{metric_key_prefix}_steps_per_second""" , __lowercase)
__UpperCamelCase :Union[str, Any] = metrics.pop(f"""{metric_key_prefix}_jit_compilation_time""" , __lowercase)
for k, v in metrics.items():
if k == f"""{metric_key_prefix}_loss""":
__UpperCamelCase :Tuple = v
else:
__UpperCamelCase :List[Any] = k.split('''_''')
__UpperCamelCase :Optional[Any] = ''' '''.join([part.capitalize() for part in splits[1:]])
__UpperCamelCase :str = v
self.training_tracker.write_line(__lowercase)
self.training_tracker.remove_child()
__UpperCamelCase :str = None
# Evaluation takes a long time so we should force the next update.
__UpperCamelCase :Optional[Any] = True
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , **__lowercase) -> int:
self.training_tracker.update(
state.global_step , comment=f"""Epoch {int(state.epoch)}/{state.num_train_epochs}""" , force_update=__lowercase)
__UpperCamelCase :Dict = None
| 105
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=SCREAMING_SNAKE_CASE , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=SCREAMING_SNAKE_CASE , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=SCREAMING_SNAKE_CASE )
return parser.parse_args()
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = parse_args()
# Import training_script as a module.
__UpperCamelCase :Dict = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__UpperCamelCase :Any = script_fpath.stem
__UpperCamelCase :Union[str, Any] = importlib.import_module(SCREAMING_SNAKE_CASE )
# Patch sys.argv
__UpperCamelCase :Dict = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 105
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase__ :Union[str, Any] = TypeVar('''T''')
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
return (position - 1) // 2
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
return (2 * position) + 1
def lowerCAmelCase__ ( a__: int ) -> int:
'''simple docstring'''
return (2 * position) + 2
class __a ( Generic[T] ):
def __init__( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = 0
def __len__( self ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self ) -> str:
"""simple docstring"""
return str(self.heap )
def UpperCAmelCase__ ( self ) -> bool:
"""simple docstring"""
return self.elements == 0
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
_UpperCAmelCase = self.elements
self.elements += 1
self._bubble_up(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
_UpperCAmelCase , _UpperCAmelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
_UpperCAmelCase , _UpperCAmelCase = self.heap[0]
self._bubble_down(_SCREAMING_SNAKE_CASE )
return elem
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_UpperCAmelCase = self.position_map[elem]
_UpperCAmelCase = (elem, weight)
if position > 0:
_UpperCAmelCase = get_parent_position(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_SCREAMING_SNAKE_CASE )
else:
self._bubble_down(_SCREAMING_SNAKE_CASE )
else:
self._bubble_down(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_UpperCAmelCase = self.position_map[elem]
if curr_pos == 0:
return None
_UpperCAmelCase = get_parent_position(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = self.heap[curr_pos]
_UpperCAmelCase , _UpperCAmelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_up(_SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_UpperCAmelCase = self.position_map[elem]
_UpperCAmelCase , _UpperCAmelCase = self.heap[curr_pos]
_UpperCAmelCase = get_child_left_position(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = get_child_right_position(_SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
_UpperCAmelCase , _UpperCAmelCase = self.heap[child_left_position]
_UpperCAmelCase , _UpperCAmelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_down(_SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
_UpperCAmelCase , _UpperCAmelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_down(_SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
_UpperCAmelCase , _UpperCAmelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return self._bubble_down(_SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
_UpperCAmelCase = self.heap[nodea_pos][0]
_UpperCAmelCase = self.heap[nodea_pos][0]
_UpperCAmelCase , _UpperCAmelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
_UpperCAmelCase = nodea_pos
_UpperCAmelCase = nodea_pos
class __a ( Generic[T] ):
def __init__( self ) -> None:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = 0
def __repr__( self ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self ) -> int:
"""simple docstring"""
return self.nodes
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
if node not in self.connections:
_UpperCAmelCase = {}
self.nodes += 1
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
self.add_node(_SCREAMING_SNAKE_CASE )
self.add_node(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = weight
_UpperCAmelCase = weight
def lowerCAmelCase__ ( a__: GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
_UpperCAmelCase = {node: maxsize for node in graph.connections}
_UpperCAmelCase = {node: None for node in graph.connections}
_UpperCAmelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(a__ , a__ )
if priority_queue.is_empty():
return dist, parent
# initialization
_UpperCAmelCase = priority_queue.extract_min()
_UpperCAmelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_UpperCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
_UpperCAmelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
_UpperCAmelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
_UpperCAmelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(a__ , dist[neighbour] )
_UpperCAmelCase = node
return dist, parent
| 329
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase__ :Optional[int] = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int]=None ) -> Any:
'''simple docstring'''
require_version(deps[pkg] , a__ )
| 329
| 1
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase, __lowerCamelCase )
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = emb.weight.shape
SCREAMING_SNAKE_CASE_ = nn.Linear(__lowerCamelCase, __lowerCamelCase, bias=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = emb.weight.data
return lin_layer
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = torch.load(__lowerCamelCase, map_location='''cpu''' )
SCREAMING_SNAKE_CASE_ = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
SCREAMING_SNAKE_CASE_ = mam_aaa['''model''']
remove_ignore_keys_(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
SCREAMING_SNAKE_CASE_ = MaMaaaConfig(
vocab_size=__lowerCamelCase, max_position_embeddings=10_24, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''relu''', )
SCREAMING_SNAKE_CASE_ = state_dict['''decoder.embed_tokens.weight''']
SCREAMING_SNAKE_CASE_ = MaMaaaForConditionalGeneration(__lowerCamelCase )
model.model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 257
|
def A__ ( __lowerCamelCase ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__UpperCAmelCase = int(input("Enter number: ").strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 257
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.