code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
return 1 if input_a == input_a else 0
def UpperCamelCase__ ( ):
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 717
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase_ = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 0
|
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = '''AutoTokenizer'''
lowercase : List[str] = ['''tokenizer''']
lowercase : str = {
'''semantic_prompt''': 1,
'''coarse_prompt''': 2,
'''fine_prompt''': 2,
}
def __init__( self , __A , __A=None ) -> Any:
super().__init__(A__ )
_lowerCAmelCase =speaker_embeddings
@classmethod
def UpperCamelCase__ ( cls , __A , __A="speaker_embeddings_path.json" , **__A ) -> List[str]:
if speaker_embeddings_dict_path is not None:
_lowerCAmelCase =get_file_from_repo(
A__ , A__ , subfolder=kwargs.pop('subfolder' , A__ ) , cache_dir=kwargs.pop('cache_dir' , A__ ) , force_download=kwargs.pop('force_download' , A__ ) , proxies=kwargs.pop('proxies' , A__ ) , resume_download=kwargs.pop('resume_download' , A__ ) , local_files_only=kwargs.pop('local_files_only' , A__ ) , use_auth_token=kwargs.pop('use_auth_token' , A__ ) , revision=kwargs.pop('revision' , A__ ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(A__ , A__ )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_lowerCAmelCase =None
else:
with open(A__ ) as speaker_embeddings_json:
_lowerCAmelCase =json.load(A__ )
else:
_lowerCAmelCase =None
_lowerCAmelCase =AutoTokenizer.from_pretrained(A__ , **A__ )
return cls(tokenizer=A__ , speaker_embeddings=A__ )
def UpperCamelCase__ ( self , __A , __A="speaker_embeddings_path.json" , __A="speaker_embeddings" , __A = False , **__A , ) -> Dict:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(A__ , A__ , 'v2' ) , exist_ok=A__ )
_lowerCAmelCase ={}
_lowerCAmelCase =save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCAmelCase =self._load_voice_preset(A__ )
_lowerCAmelCase ={}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , A__ , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=A__ , )
_lowerCAmelCase =os.path.join(A__ , F'''{prompt_key}_{key}.npy''' )
_lowerCAmelCase =tmp_dict
with open(os.path.join(A__ , A__ ) , 'w' ) as fp:
json.dump(A__ , A__ )
super().save_pretrained(A__ , A__ , **A__ )
def UpperCamelCase__ ( self , __A = None , **__A ) -> int:
_lowerCAmelCase =self.speaker_embeddings[voice_preset]
_lowerCAmelCase ={}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_lowerCAmelCase =get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , A__ ) , cache_dir=kwargs.pop('cache_dir' , A__ ) , force_download=kwargs.pop('force_download' , A__ ) , proxies=kwargs.pop('proxies' , A__ ) , resume_download=kwargs.pop('resume_download' , A__ ) , local_files_only=kwargs.pop('local_files_only' , A__ ) , use_auth_token=kwargs.pop('use_auth_token' , A__ ) , revision=kwargs.pop('revision' , A__ ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_lowerCAmelCase =np.load(A__ )
return voice_preset_dict
def UpperCamelCase__ ( self , __A = None ) -> Optional[int]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , __A=None , __A=None , __A="pt" , __A=256 , __A=False , __A=True , __A=False , **__A , ) -> Tuple:
if voice_preset is not None and not isinstance(A__ , A__ ):
if (
isinstance(A__ , A__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCAmelCase =self._load_voice_preset(A__ )
else:
if isinstance(A__ , A__ ) and not voice_preset.endswith('.npz' ):
_lowerCAmelCase =voice_preset + """.npz"""
_lowerCAmelCase =np.load(A__ )
if voice_preset is not None:
self._validate_voice_preset_dict(A__ , **A__ )
_lowerCAmelCase =BatchFeature(data=A__ , tensor_type=A__ )
_lowerCAmelCase =self.tokenizer(
A__ , return_tensors=A__ , padding='max_length' , max_length=A__ , return_attention_mask=A__ , return_token_type_ids=A__ , add_special_tokens=A__ , **A__ , )
if voice_preset is not None:
_lowerCAmelCase =voice_preset
return encoded_text
| 719
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 0
|
'''simple docstring'''
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowercase_ = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : str
lowercase : Optional[str] = None
lowercase : Optional[Union[str, int]] = None
lowercase : Optional[Union[str, int]] = None
lowercase : Optional[Union[str, int]] = None
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =_str_to_version_tuple(self.version_str )
def __repr__( self ) -> Tuple:
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def UpperCamelCase__ ( self ) -> List[str]:
return self.major, self.minor, self.patch
def UpperCamelCase__ ( self , __A ) -> List[str]:
if isinstance(_a , _a ):
return Version(_a )
elif isinstance(_a , _a ):
return other
raise TypeError(F'''{other} (type {type(_a )}) cannot be compared to version.''' )
def __eq__( self , __A ) -> Optional[Any]:
try:
_lowerCAmelCase =self._validate_operand(_a )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , __A ) -> List[Any]:
_lowerCAmelCase =self._validate_operand(_a )
return self.tuple < other.tuple
def __hash__( self ) -> int:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def UpperCamelCase__ ( cls , __A ) -> Union[str, Any]:
_lowerCAmelCase ={f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def UpperCamelCase__ ( self ) -> str:
return self.version_str
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =_VERSION_REG.match(lowerCAmelCase__ )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(lowerCAmelCase__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return ".".join(str(lowerCAmelCase__ ) for v in version_tuple )
| 720
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowercase_ = [
"""kernels/rwkv/wkv_cuda.cu""",
"""kernels/rwkv/wkv_op.cpp""",
"""kernels/deformable_detr/ms_deform_attn.h""",
"""kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""",
"""models/graphormer/algos_graphormer.pyx""",
]
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowercase_ = parser.parse_args()
if args.check_lib:
lowercase_ = importlib.import_module('''transformers''')
lowercase_ = Path(transformers_module.__file__).parent
else:
lowercase_ = Path.cwd() / """build/lib/transformers"""
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 700
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 0
|
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowercase_ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase):
"""simple docstring"""
lowercase : int = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'})
lowercase : Tuple = field(default=_UpperCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'})
lowercase : Optional[Any] = field(
default=_UpperCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'})
lowercase : List[str] = field(default=_UpperCAmelCase , metadata={'help': 'whether to use adafactor'})
lowercase : List[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'})
lowercase : Dict = field(
default=_UpperCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'})
lowercase : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'})
lowercase : Any = field(
default=_UpperCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'})
lowercase : Dict = field(
default='linear' , metadata={'help': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys())}"""} , )
| 701
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 0
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = (KDPMaDiscreteScheduler,)
lowercase : Optional[Any] = 10
def UpperCamelCase__ ( self , **__A ) -> Dict:
_lowerCAmelCase ={
'num_train_timesteps': 1100,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**__A )
return config
def UpperCamelCase__ ( self ) -> str:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def UpperCamelCase__ ( self ) -> List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__A )
def UpperCamelCase__ ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase =sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def UpperCamelCase__ ( self ) -> List[str]:
if torch_device == "mps":
return
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase =sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def UpperCamelCase__ ( self ) -> Any:
if torch_device == "mps":
return
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter.to(__A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if str(__A ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 702
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE ( __A):
"""simple docstring"""
pass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> Union[str, Any]:
_lowerCAmelCase =data
_lowerCAmelCase =None
def __iter__( self ) -> Any:
_lowerCAmelCase =self
_lowerCAmelCase =[]
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
_lowerCAmelCase =node.next_node
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowercase_ = Node(1)
lowercase_ = Node(2)
lowercase_ = Node(3)
lowercase_ = Node(4)
print(root_node.has_loop) # False
lowercase_ = root_node.next_node
print(root_node.has_loop) # True
lowercase_ = Node(5)
lowercase_ = Node(6)
lowercase_ = Node(5)
lowercase_ = Node(6)
print(root_node.has_loop) # False
lowercase_ = Node(1)
print(root_node.has_loop) # False
| 703
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 0
|
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def UpperCamelCase__ ( a__ ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def UpperCamelCase__ ( ):
'''simple docstring'''
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
_lowerCAmelCase =[1, 2, 3]
with pytest.raises(__snake_case ):
with parallel_backend('unsupported backend' ):
map_nested(__snake_case , __snake_case , num_proc=2 )
with pytest.raises(__snake_case ):
with parallel_backend('unsupported backend' ):
map_nested(__snake_case , __snake_case , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =[1, 2]
_lowerCAmelCase ={'a': 1, 'b': 2}
_lowerCAmelCase ={'a': [1, 2], 'b': [3, 4]}
_lowerCAmelCase ={'a': {'1': 1}, 'b': 2}
_lowerCAmelCase ={'a': 1, 'b': 2, 'c': 3, 'd': 4}
_lowerCAmelCase =[2, 3]
_lowerCAmelCase ={'a': 2, 'b': 3}
_lowerCAmelCase ={'a': [2, 3], 'b': [4, 5]}
_lowerCAmelCase ={'a': {'1': 2}, 'b': 3}
_lowerCAmelCase ={'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
assert map_nested(__snake_case , __snake_case , num_proc=__snake_case ) == expected_map_nested_sa
| 704
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE ( __a , unittest.TestCase):
"""simple docstring"""
lowercase : Union[str, Any] = CTRLTokenizer
lowercase : int = False
lowercase : List[str] = False
def UpperCamelCase__ ( self ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase =['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_lowerCAmelCase =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
_lowerCAmelCase =['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_lowerCAmelCase ={'unk_token': '<unk>'}
_lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def UpperCamelCase__ ( self , **__A ) -> Tuple:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCamelCase__ ( self , __A ) -> str:
_lowerCAmelCase ='adapt react readapt apt'
_lowerCAmelCase ='adapt react readapt apt'
return input_text, output_text
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase ='adapt react readapt apt'
_lowerCAmelCase ='adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_lowerCAmelCase =tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
_lowerCAmelCase =tokens + [tokenizer.unk_token]
_lowerCAmelCase =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
| 705
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> Any:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(a_ ):
_lowerCAmelCase =AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
_lowerCAmelCase =FlaxAutoModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(a_ ):
_lowerCAmelCase =AutoConfig.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
_lowerCAmelCase =FlaxAutoModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
self.assertIsInstance(a_ , a_ )
@slow
def UpperCamelCase__ ( self ) -> Dict:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
_lowerCAmelCase =AutoTokenizer.from_pretrained(a_ )
_lowerCAmelCase =FlaxBertModel.from_pretrained(a_ )
_lowerCAmelCase =tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__A ):
return model(**a_ )
eval(**a_ ).block_until_ready()
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
for model_name in ["roberta-base", "roberta-large"]:
_lowerCAmelCase =AutoTokenizer.from_pretrained(a_ )
_lowerCAmelCase =FlaxRobertaModel.from_pretrained(a_ )
_lowerCAmelCase =tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__A ):
return model(**a_ )
eval(**a_ ).block_until_ready()
def UpperCamelCase__ ( self ) -> Tuple:
with self.assertRaisesRegex(
a_ , 'bert-base is not a local folder and is not a valid model identifier' ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained('bert-base' )
def UpperCamelCase__ ( self ) -> Tuple:
with self.assertRaisesRegex(
a_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained(a_ , revision='aaaaaa' )
def UpperCamelCase__ ( self ) -> str:
with self.assertRaisesRegex(
a_ , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def UpperCamelCase__ ( self ) -> Tuple:
with self.assertRaisesRegex(a_ , 'Use `from_pt=True` to load this model' ):
_lowerCAmelCase =FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 706
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowerCAmelCase =str(bin(_snake_case ) )[2:] # remove the leading "0b"
_lowerCAmelCase =str(bin(_snake_case ) )[2:] # remove the leading "0b"
_lowerCAmelCase =max(len(_snake_case ) , len(_snake_case ) )
return "0b" + "".join(
str(int(char_a == '1' and char_b == '1' ) )
for char_a, char_b in zip(a_binary.zfill(_snake_case ) , b_binary.zfill(_snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'num_attention_heads' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=32 , __A=2 , __A=3 , __A=640 , __A=4 , __A="silu" , __A=3 , __A=32 , __A=0.1 , __A=0.1 , __A=0.1 , __A=0.02 , __A=True , __A=True , __A=10 , __A=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =patch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =last_hidden_size
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =conv_kernel_size
_lowerCAmelCase =output_stride
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =classifier_dropout_prob
_lowerCAmelCase =use_labels
_lowerCAmelCase =is_training
_lowerCAmelCase =num_labels
_lowerCAmelCase =initializer_range
_lowerCAmelCase =scope
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase__ ( self ) -> List[str]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
_lowerCAmelCase =MobileViTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowerCAmelCase =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =MobileViTForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowerCAmelCase =model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowerCAmelCase =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCAmelCase =model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
"""simple docstring"""
lowercase : Union[str, Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase : List[Any] = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase : List[str] = False
lowercase : Union[str, Any] = False
lowercase : List[Any] = False
lowercase : Optional[int] = False
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =MobileViTModelTester(self )
_lowerCAmelCase =MobileViTConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def UpperCamelCase__ ( self ) -> int:
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def UpperCamelCase__ ( self ) -> Dict:
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def UpperCamelCase__ ( self ) -> Tuple:
pass
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> List[str]:
def check_hidden_states_output(__A , __A , __A ):
_lowerCAmelCase =model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_lowerCAmelCase =model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_lowerCAmelCase =outputs.hidden_states
_lowerCAmelCase =5
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCAmelCase =2
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =MobileViTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ) -> List[str]:
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**_SCREAMING_SNAKE_CASE )
# verify the logits
_lowerCAmelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase =torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_lowerCAmelCase =model.to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase =MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**_SCREAMING_SNAKE_CASE )
_lowerCAmelCase =outputs.logits
# verify the logits
_lowerCAmelCase =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase =torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9_868, -9.7_132], [-11.0405, -11.0221, -10.7318]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=_SCREAMING_SNAKE_CASE , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_lowerCAmelCase =model.to(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase =MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
_lowerCAmelCase =model(**_SCREAMING_SNAKE_CASE )
_lowerCAmelCase =outputs.logits.detach().cpu()
_lowerCAmelCase =image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE , target_sizes=[(50, 60)] )
_lowerCAmelCase =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase =image_processor.post_process_semantic_segmentation(outputs=_SCREAMING_SNAKE_CASE )
_lowerCAmelCase =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _SCREAMING_SNAKE_CASE )
| 708
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 0
|
'''simple docstring'''
from manim import *
class SCREAMING_SNAKE_CASE ( _A):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =Rectangle(height=0.5 , width=0.5 )
_lowerCAmelCase =Rectangle(height=0.25 , width=0.25 )
_lowerCAmelCase =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =Text('CPU' , font_size=24 )
_lowerCAmelCase =Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
_lowerCAmelCase =[mem.copy() for i in range(4 )]
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =Text('GPU' , font_size=24 )
_lowerCAmelCase =Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(UpperCamelCase__ )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =Text('Model' , font_size=24 )
_lowerCAmelCase =Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.add(UpperCamelCase__ )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i, rect in enumerate(UpperCamelCase__ ):
rect.set_stroke(UpperCamelCase__ )
_lowerCAmelCase =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCamelCase__ , buff=0.0 )
self.add(UpperCamelCase__ )
model_cpu_arr.append(UpperCamelCase__ )
self.add(*UpperCamelCase__ , *UpperCamelCase__ , *UpperCamelCase__ )
_lowerCAmelCase =[mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =Text('Loaded Checkpoint' , font_size=24 )
_lowerCAmelCase =Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(UpperCamelCase__ )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i, rect in enumerate(UpperCamelCase__ ):
_lowerCAmelCase =fill.copy().set_fill(UpperCamelCase__ , opacity=0.7 )
target.move_to(UpperCamelCase__ )
ckpt_arr.append(UpperCamelCase__ )
_lowerCAmelCase =target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(UpperCamelCase__ )
self.add(*UpperCamelCase__ , *UpperCamelCase__ )
_lowerCAmelCase =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_lowerCAmelCase =MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCamelCase__ , UpperCamelCase__ )
_lowerCAmelCase =MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(UpperCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCamelCase__ )
_lowerCAmelCase =MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
_lowerCAmelCase =[meta_mem.copy() for i in range(6 )]
_lowerCAmelCase =[meta_mem.copy() for i in range(6 )]
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
_lowerCAmelCase =Text('Disk' , font_size=24 )
_lowerCAmelCase =Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(UpperCamelCase__ , run_time=3 ) , Write(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) )
_lowerCAmelCase =[]
for i, rect in enumerate(UpperCamelCase__ ):
_lowerCAmelCase =rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(FadeOut(UpperCamelCase__ ) )
_lowerCAmelCase =MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=3 ) )
self.play(
FadeOut(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , *UpperCamelCase__ ) , )
self.wait()
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
lowercase_ = list[tuple[int, int]]
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> List[Any]:
_lowerCAmelCase =pos_x
_lowerCAmelCase =pos_y
_lowerCAmelCase =(pos_y, pos_x)
_lowerCAmelCase =goal_x
_lowerCAmelCase =goal_y
_lowerCAmelCase =parent
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[int]:
_lowerCAmelCase =Node(start[1] , start[0] , goal[1] , goal[0] , _A )
_lowerCAmelCase =Node(goal[1] , goal[0] , goal[1] , goal[0] , _A )
_lowerCAmelCase =[self.start]
_lowerCAmelCase =False
def UpperCamelCase__ ( self ) -> Optional[int]:
while self.node_queue:
_lowerCAmelCase =self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
_lowerCAmelCase =True
return self.retrace_path(_A )
_lowerCAmelCase =self.get_successors(_A )
for node in successors:
self.node_queue.append(_A )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase__ ( self , __A ) -> int:
_lowerCAmelCase =[]
for action in delta:
_lowerCAmelCase =parent.pos_x + action[1]
_lowerCAmelCase =parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(_A , _A , self.target.pos_y , self.target.pos_x , _A ) )
return successors
def UpperCamelCase__ ( self , __A ) -> Union[str, Any]:
_lowerCAmelCase =node
_lowerCAmelCase =[]
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase =current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase =BreadthFirstSearch(_A , _A )
_lowerCAmelCase =BreadthFirstSearch(_A , _A )
_lowerCAmelCase =False
def UpperCamelCase__ ( self ) -> str:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
_lowerCAmelCase =self.fwd_bfs.node_queue.pop(0 )
_lowerCAmelCase =self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
_lowerCAmelCase =True
return self.retrace_bidirectional_path(
_A , _A )
_lowerCAmelCase =current_bwd_node
_lowerCAmelCase =current_fwd_node
_lowerCAmelCase ={
self.fwd_bfs: self.fwd_bfs.get_successors(_A ),
self.bwd_bfs: self.bwd_bfs.get_successors(_A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(_A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCamelCase__ ( self , __A , __A ) -> Dict:
_lowerCAmelCase =self.fwd_bfs.retrace_path(_A )
_lowerCAmelCase =self.bwd_bfs.retrace_path(_A )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase =fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase_ = time.time()
lowercase_ = BreadthFirstSearch(init, goal)
lowercase_ = bfs.search()
lowercase_ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
lowercase_ = time.time()
lowercase_ = BidirectionalBreadthFirstSearch(init, goal)
lowercase_ = bd_bfs.search()
lowercase_ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 710
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'efficientnet'
def __init__( self , __A = 3 , __A = 600 , __A = 2.0 , __A = 3.1 , __A = 8 , __A = [3, 3, 5, 3, 5, 5, 3] , __A = [32, 16, 24, 40, 80, 112, 192] , __A = [16, 24, 40, 80, 112, 192, 320] , __A = [] , __A = [1, 2, 2, 2, 1, 2, 1] , __A = [1, 2, 2, 3, 3, 4, 1] , __A = [1, 6, 6, 6, 6, 6, 6] , __A = 0.25 , __A = "swish" , __A = 2560 , __A = "mean" , __A = 0.02 , __A = 0.001 , __A = 0.99 , __A = 0.5 , __A = 0.2 , **__A , ) -> Optional[int]:
super().__init__(**__a )
_lowerCAmelCase =num_channels
_lowerCAmelCase =image_size
_lowerCAmelCase =width_coefficient
_lowerCAmelCase =depth_coefficient
_lowerCAmelCase =depth_divisor
_lowerCAmelCase =kernel_sizes
_lowerCAmelCase =in_channels
_lowerCAmelCase =out_channels
_lowerCAmelCase =depthwise_padding
_lowerCAmelCase =strides
_lowerCAmelCase =num_block_repeats
_lowerCAmelCase =expand_ratios
_lowerCAmelCase =squeeze_expansion_ratio
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dim
_lowerCAmelCase =pooling_type
_lowerCAmelCase =initializer_range
_lowerCAmelCase =batch_norm_eps
_lowerCAmelCase =batch_norm_momentum
_lowerCAmelCase =dropout_rate
_lowerCAmelCase =drop_connect_rate
_lowerCAmelCase =sum(__a ) * 4
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = version.parse('1.11')
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
return 1E-5
| 711
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 0
|
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase__ ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ="""mock-s3-bucket"""
_lowerCAmelCase =F'''s3://{mock_bucket}'''
_lowerCAmelCase =extract_path_from_uri(lowerCamelCase_ )
assert dataset_path.startswith('s3://' ) is False
_lowerCAmelCase ="""./local/path"""
_lowerCAmelCase =extract_path_from_uri(lowerCamelCase_ )
assert dataset_path == new_dataset_path
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =is_remote_filesystem(lowerCamelCase_ )
assert is_remote is True
_lowerCAmelCase =fsspec.filesystem('file' )
_lowerCAmelCase =is_remote_filesystem(lowerCamelCase_ )
assert is_remote is False
@pytest.mark.parametrize('compression_fs_class' , lowerCamelCase_ )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase ={"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
_lowerCAmelCase =input_paths[compression_fs_class.protocol]
if input_path is None:
_lowerCAmelCase =F'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase_ )
_lowerCAmelCase =fsspec.filesystem(compression_fs_class.protocol , fo=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase =os.path.basename(lowerCamelCase_ )
_lowerCAmelCase =expected_filename[: expected_filename.rindex('.' )]
assert fs.glob('*' ) == [expected_filename]
with fs.open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f, open(lowerCamelCase_ , encoding='utf-8' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('protocol' , ['zip', 'gzip'] )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase ={"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
_lowerCAmelCase =compressed_file_paths[protocol]
_lowerCAmelCase ="""dataset.jsonl"""
_lowerCAmelCase =F'''{protocol}://{member_file_path}::{compressed_file_path}'''
_lowerCAmelCase =fsspec.get_fs_token_paths(lowerCamelCase_ )
assert fs.isfile(lowerCamelCase_ )
assert not fs.isfile('non_existing_' + member_file_path )
@pytest.mark.integration
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =hf_api.dataset_info(lowerCamelCase_ , token=lowerCamelCase_ )
_lowerCAmelCase =HfFileSystem(repo_info=lowerCamelCase_ , token=lowerCamelCase_ )
assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"]
assert hffs.isdir('data' )
assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' )
with open(lowerCamelCase_ ) as f:
assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read()
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ="""bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCamelCase_ , lowerCamelCase_ , clobber=lowerCamelCase_ )
with pytest.warns(lowerCamelCase_ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCamelCase_ ) == 1
assert (
str(warning_info[0].message )
== F'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 712
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 0
|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def run_func(a__ ):
@wraps(A__ )
def run_in_eager_mode(*a__ , **a__ ):
return func(*A__ , **A__ )
@wraps(A__ )
@tf.function(experimental_compile=A__ )
def run_in_graph_mode(*a__ , **a__ ):
return func(*A__ , **A__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =random.Random()
_lowerCAmelCase =[rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(A__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class SCREAMING_SNAKE_CASE ( __lowerCamelCase):
"""simple docstring"""
lowercase : TensorFlowBenchmarkArguments
lowercase : PretrainedConfig
lowercase : str = "TensorFlow"
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
return tf.__version__
def UpperCamelCase__ ( self , __A , __A , __A ) -> Union[str, Any]:
# initialize GPU on separate process
_lowerCAmelCase =self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_lowerCAmelCase =self._prepare_inference_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_speed(_inference )
def UpperCamelCase__ ( self , __A , __A , __A ) -> List[str]:
_lowerCAmelCase =self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_lowerCAmelCase =self._prepare_train_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_speed(_train )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Tuple:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase_ )
_lowerCAmelCase =self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_lowerCAmelCase =self._prepare_inference_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_memory(_inference )
def UpperCamelCase__ ( self , __A , __A , __A ) -> List[str]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , UpperCamelCase_ )
_lowerCAmelCase =self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
_lowerCAmelCase =self._prepare_train_func(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self._measure_memory(_train )
def UpperCamelCase__ ( self , __A , __A , __A ) -> List[Any]:
_lowerCAmelCase =self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_lowerCAmelCase =(
hasattr(UpperCamelCase_ , 'architectures' )
and isinstance(config.architectures , UpperCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase ='TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase =__import__('transformers' , fromlist=[model_class] )
_lowerCAmelCase =getattr(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase =model_cls(UpperCamelCase_ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_lowerCAmelCase =TF_MODEL_MAPPING[config.__class__](UpperCamelCase_ )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase =config.vocab_size if hasattr(UpperCamelCase_ , 'vocab_size' ) else config.encoder.vocab_size
_lowerCAmelCase =random_input_ids(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ , training=UpperCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(UpperCamelCase_ , training=UpperCamelCase_ )
_lowerCAmelCase =encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase__ ( self , __A , __A , __A ) -> Any:
_lowerCAmelCase =self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
_lowerCAmelCase =(
hasattr(UpperCamelCase_ , 'architectures' )
and isinstance(config.architectures , UpperCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_lowerCAmelCase ='TF' + config.architectures[0] # prepend 'TF' for tensorflow model
_lowerCAmelCase =__import__('transformers' , fromlist=[model_class] )
_lowerCAmelCase =getattr(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase =model_cls(UpperCamelCase_ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
_lowerCAmelCase =TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](UpperCamelCase_ )
# encoder-decoder has vocab size saved differently
_lowerCAmelCase =config.vocab_size if hasattr(UpperCamelCase_ , 'vocab_size' ) else config.encoder.vocab_size
_lowerCAmelCase =random_input_ids(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_lowerCAmelCase =model(UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )[0]
_lowerCAmelCase =tf.gradients(UpperCamelCase_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_lowerCAmelCase =model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )[0]
_lowerCAmelCase =tf.gradients(UpperCamelCase_ , model.trainable_variables )
return gradients
_lowerCAmelCase =encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase__ ( self , __A ) -> Union[str, Any]:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(UpperCamelCase_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_lowerCAmelCase =timeit.repeat(
UpperCamelCase_ , repeat=self.args.repeat , number=10 , )
return min(UpperCamelCase_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def UpperCamelCase__ ( self , __A ) -> List[Any]:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
_lowerCAmelCase =start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
_lowerCAmelCase ='N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
_lowerCAmelCase =nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_lowerCAmelCase =nvml.nvmlDeviceGetMemoryInfo(UpperCamelCase_ )
_lowerCAmelCase =meminfo.used
_lowerCAmelCase =Memory(UpperCamelCase_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
_lowerCAmelCase =None
else:
_lowerCAmelCase =measure_peak_memory_cpu(UpperCamelCase_ )
_lowerCAmelCase =Memory(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
_lowerCAmelCase =stop_memory_tracing(UpperCamelCase_ )
if memory is None:
_lowerCAmelCase =summary.total
else:
_lowerCAmelCase =None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 713
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase):
"""simple docstring"""
lowercase : Optional[Any] = GPTaTokenizer
lowercase : int = GPTaTokenizerFast
lowercase : int = True
lowercase : Any = {'add_prefix_space': True}
lowercase : Optional[Any] = False
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCAmelCase =dict(zip(__A , range(len(__A ) ) ) )
_lowerCAmelCase =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCAmelCase ={"unk_token": "<unk>"}
_lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__A ) )
def UpperCamelCase__ ( self , **__A ) -> int:
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **__A )
def UpperCamelCase__ ( self , **__A ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **__A )
def UpperCamelCase__ ( self , __A ) -> Any:
_lowerCAmelCase ="lower newer"
_lowerCAmelCase ="lower newer"
return input_text, output_text
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase ="lower newer"
_lowerCAmelCase =["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCAmelCase =tokenizer.tokenize(__A , add_prefix_space=__A )
self.assertListEqual(__A , __A )
_lowerCAmelCase =tokens + [tokenizer.unk_token]
_lowerCAmelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def UpperCamelCase__ ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
_lowerCAmelCase =self.get_tokenizer()
_lowerCAmelCase =self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCAmelCase ="lower newer"
# Testing tokenization
_lowerCAmelCase =tokenizer.tokenize(__A , add_prefix_space=__A )
_lowerCAmelCase =rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids without special tokens
_lowerCAmelCase =tokenizer.encode(__A , add_special_tokens=__A , add_prefix_space=__A )
_lowerCAmelCase =rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
# Testing conversion to ids with special tokens
_lowerCAmelCase =self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCAmelCase =tokenizer.encode(__A , add_prefix_space=__A )
_lowerCAmelCase =rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
# Testing the unknown token
_lowerCAmelCase =tokens + [rust_tokenizer.unk_token]
_lowerCAmelCase =[14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ) , __A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Tuple:
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase__ ( self , __A=15 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCAmelCase =self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
_lowerCAmelCase ="This is a simple input"
_lowerCAmelCase =["This is a simple input 1", "This is a simple input 2"]
_lowerCAmelCase =("This is a simple input", "This is a pair")
_lowerCAmelCase =[
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding='max_length' )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding='max_length' )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding='max_length' , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding='max_length' )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding='max_length' )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding='max_length' , )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
_lowerCAmelCase ="This is a simple input"
_lowerCAmelCase =["This is a simple input looooooooong", "This is a simple input"]
_lowerCAmelCase =("This is a simple input", "This is a pair")
_lowerCAmelCase =[
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCAmelCase =tokenizer.pad_token_id
_lowerCAmelCase =tokenizer(__A , padding='max_length' , max_length=30 , return_tensors='np' )
_lowerCAmelCase =tokenizer(__A , padding=__A , truncate=__A , return_tensors='np' )
_lowerCAmelCase =tokenizer(*__A , padding='max_length' , max_length=60 , return_tensors='np' )
_lowerCAmelCase =tokenizer(__A , padding=__A , truncate=__A , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase ="$$$"
_lowerCAmelCase =GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=__A , add_bos_token=__A )
_lowerCAmelCase ="This is a simple input"
_lowerCAmelCase =["This is a simple input 1", "This is a simple input 2"]
_lowerCAmelCase =tokenizer.bos_token_id
_lowerCAmelCase =tokenizer(__A )
_lowerCAmelCase =tokenizer(__A )
self.assertEqual(out_s.input_ids[0] , __A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCAmelCase =tokenizer.decode(out_s.input_ids )
_lowerCAmelCase =tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def UpperCamelCase__ ( self ) -> int:
pass
def UpperCamelCase__ ( self ) -> Union[str, Any]:
# TODO: change to self.get_tokenizers() when the fast version is implemented
_lowerCAmelCase =[self.get_tokenizer(do_lower_case=__A , add_bos_token=__A )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowerCAmelCase ="Encode this."
_lowerCAmelCase ="This one too please."
_lowerCAmelCase =tokenizer.encode(__A , add_special_tokens=__A )
encoded_sequence += tokenizer.encode(__A , add_special_tokens=__A )
_lowerCAmelCase =tokenizer.encode_plus(
__A , __A , add_special_tokens=__A , return_special_tokens_mask=__A , )
_lowerCAmelCase =encoded_sequence_dict["input_ids"]
_lowerCAmelCase =encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(__A ) , len(__A ) )
_lowerCAmelCase =[
(x if not special_tokens_mask[i] else None) for i, x in enumerate(__A )
]
_lowerCAmelCase =[x for x in filtered_sequence if x is not None]
self.assertEqual(__A , __A )
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Tuple:
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
_lowerCAmelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=__A )
_lowerCAmelCase ="A photo of a cat"
_lowerCAmelCase =tokenizer.encode(
__A , )
self.assertEqual(__A , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
_lowerCAmelCase =AutoTokenizer.from_pretrained('./test_opt' )
_lowerCAmelCase =tokenizer.encode(
__A , )
self.assertEqual(__A , [2, 250, 1345, 9, 10, 4758] )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=__A )
_lowerCAmelCase ="A photo of a cat"
_lowerCAmelCase =tokenizer.encode(
__A , )
# Same as above
self.assertEqual(__A , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=__A )
_lowerCAmelCase ="bos"
_lowerCAmelCase =tokenizer.get_vocab()["bos"]
_lowerCAmelCase ="A photo of a cat"
_lowerCAmelCase =tokenizer.encode(
__A , )
# We changed the bos token
self.assertEqual(__A , [3_1957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
_lowerCAmelCase =AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
_lowerCAmelCase =tokenizer.encode(
__A , )
self.assertEqual(__A , [3_1957, 250, 1345, 9, 10, 4758] )
| 714
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A=2 , __A=3 , __A=4 , __A=2 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=36 , __A=2 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=6 , __A=6 , __A=3 , __A=4 , __A=None , __A=1000 , ) -> List[Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =image_size
_lowerCAmelCase =patch_size
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_token_type_ids
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =coordinate_size
_lowerCAmelCase =shape_size
_lowerCAmelCase =num_labels
_lowerCAmelCase =num_choices
_lowerCAmelCase =scope
_lowerCAmelCase =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
_lowerCAmelCase =text_seq_length
_lowerCAmelCase =(image_size // patch_size) ** 2 + 1
_lowerCAmelCase =self.text_seq_length + self.image_seq_length
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
_lowerCAmelCase =bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_lowerCAmelCase =bbox[i, j, 3]
_lowerCAmelCase =bbox[i, j, 1]
_lowerCAmelCase =tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
_lowerCAmelCase =bbox[i, j, 2]
_lowerCAmelCase =bbox[i, j, 0]
_lowerCAmelCase =tmp_coordinate
_lowerCAmelCase =tf.constant(lowercase_ )
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.text_seq_length] )
_lowerCAmelCase =None
if self.use_token_type_ids:
_lowerCAmelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
_lowerCAmelCase =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A ) -> Dict:
_lowerCAmelCase =TFLayoutLMvaModel(config=lowercase_ )
# text + image
_lowerCAmelCase =model(lowercase_ , pixel_values=lowercase_ , training=lowercase_ )
_lowerCAmelCase =model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , training=lowercase_ , )
_lowerCAmelCase =model(lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , training=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
_lowerCAmelCase =model(lowercase_ , training=lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
_lowerCAmelCase =model({'pixel_values': pixel_values} , training=lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Dict:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =TFLayoutLMvaForSequenceClassification(config=lowercase_ )
_lowerCAmelCase =model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , training=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> str:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =TFLayoutLMvaForTokenClassification(config=lowercase_ )
_lowerCAmelCase =model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , training=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[int]:
_lowerCAmelCase =2
_lowerCAmelCase =TFLayoutLMvaForQuestionAnswering(config=lowercase_ )
_lowerCAmelCase =model(
lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , training=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) =config_and_inputs
_lowerCAmelCase ={
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : Any = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase : List[Any] = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowercase : List[Any] = False
lowercase : List[Any] = False
lowercase : List[Any] = False
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A ) -> Optional[int]:
return True
def UpperCamelCase__ ( self , __A , __A , __A=False ) -> dict:
_lowerCAmelCase =copy.deepcopy(lowercase_ )
if model_class in get_values(lowercase_ ):
_lowerCAmelCase ={
k: tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowercase_ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowercase_ ):
_lowerCAmelCase =tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase_ ):
_lowerCAmelCase =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
_lowerCAmelCase =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase_ ):
_lowerCAmelCase =tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowercase_ ):
_lowerCAmelCase =tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =TFLayoutLMvaModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(lowercase_ )
if getattr(lowercase_ , 'hf_compute_loss' , lowercase_ ):
# The number of elements in the loss should be the same as the number of elements in the label
_lowerCAmelCase =self._prepare_for_class(inputs_dict.copy() , lowercase_ , return_labels=lowercase_ )
_lowerCAmelCase =prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowercase_ )[0]
]
_lowerCAmelCase =added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
_lowerCAmelCase =self._prepare_for_class(inputs_dict.copy() , lowercase_ , return_labels=lowercase_ )
_lowerCAmelCase =prepared_for_class.pop('input_ids' )
_lowerCAmelCase =model(lowercase_ , **lowercase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
_lowerCAmelCase =self._prepare_for_class(inputs_dict.copy() , lowercase_ , return_labels=lowercase_ )
_lowerCAmelCase =prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
_lowerCAmelCase =prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
_lowerCAmelCase =-100
_lowerCAmelCase =tf.convert_to_tensor(lowercase_ )
_lowerCAmelCase =model(lowercase_ , **lowercase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
_lowerCAmelCase =self._prepare_for_class(inputs_dict.copy() , lowercase_ , return_labels=lowercase_ )
_lowerCAmelCase =model(lowercase_ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
_lowerCAmelCase =self._prepare_for_class(inputs_dict.copy() , lowercase_ , return_labels=lowercase_ )
# Get keys that were added with the _prepare_for_class function
_lowerCAmelCase =prepared_for_class.keys() - inputs_dict.keys()
_lowerCAmelCase =inspect.signature(model.call ).parameters
_lowerCAmelCase =list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
_lowerCAmelCase ={0: 'input_ids'}
for label_key in label_keys:
_lowerCAmelCase =signature_names.index(lowercase_ )
_lowerCAmelCase =label_key
_lowerCAmelCase =sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
_lowerCAmelCase =[]
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
_lowerCAmelCase =prepared_for_class[value]
_lowerCAmelCase =tuple(lowercase_ )
# Send to model
_lowerCAmelCase =model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def UpperCamelCase__ ( self ) -> Optional[int]:
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ) -> Dict:
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase =type
self.model_tester.create_and_check_model(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ) -> Optional[int]:
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ) -> Optional[int]:
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase__ ( self ) -> int:
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFLayoutLMvaModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCamelCase__ ( ) -> Any:
'''simple docstring'''
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ) -> Optional[int]:
return LayoutLMvaImageProcessor(apply_ocr=lowercase_ ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=lowercase_ , return_tensors='tf' ).pixel_values
_lowerCAmelCase =tf.constant([[1, 2]] )
_lowerCAmelCase =tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
_lowerCAmelCase =model(input_ids=lowercase_ , bbox=lowercase_ , pixel_values=lowercase_ , training=lowercase_ )
# verify the logits
_lowerCAmelCase =(1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , lowercase_ )
_lowerCAmelCase =tf.constant(
[[-0.0_529, 0.3_618, 0.1_632], [-0.1_587, -0.1_667, -0.0_400], [-0.1_557, -0.1_671, -0.0_505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1E-4 ) )
| 715
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 0
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
lowercase_ = parser.parse_args()
lowercase_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 716
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 0
|
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_lowerCAmelCase =TOKENIZER_CLASSES
else:
_lowerCAmelCase ={tokenizer_name: getattr(a__ , tokenizer_name + 'Fast' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_lowerCAmelCase =TOKENIZER_CLASSES[tokenizer_name]
_lowerCAmelCase =True
if checkpoint_name is None:
_lowerCAmelCase =list(tokenizer_class.max_model_input_sizes.keys() )
else:
_lowerCAmelCase =[checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_lowerCAmelCase =tokenizer_class.from_pretrained(a__ , force_download=a__ )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_lowerCAmelCase =checkpoint.split('/' )
_lowerCAmelCase =os.path.join(a__ , a__ )
elif add_prefix:
_lowerCAmelCase =checkpoint
_lowerCAmelCase =dump_path
else:
_lowerCAmelCase =None
_lowerCAmelCase =dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_lowerCAmelCase =list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_lowerCAmelCase =file_path.split(a__ )[-1][0]
if next_char == "/":
_lowerCAmelCase =os.path.join(a__ , a__ )
_lowerCAmelCase =None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_lowerCAmelCase =tokenizer.save_pretrained(
a__ , legacy_format=a__ , filename_prefix=a__ )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(a__ )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
f'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
lowercase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 717
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 0
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =F'''{sampling_rate}'''
_lowerCAmelCase ="1"
_lowerCAmelCase ="f32le"
_lowerCAmelCase =[
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_lowerCAmelCase =ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
_lowerCAmelCase =output_stream[0]
_lowerCAmelCase =np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def UpperCamelCase__ ( a__ , a__ , a__ = "f32le" , ):
'''simple docstring'''
_lowerCAmelCase =F'''{sampling_rate}'''
_lowerCAmelCase ="1"
if format_for_conversion == "s16le":
_lowerCAmelCase =2
elif format_for_conversion == "f32le":
_lowerCAmelCase =4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
_lowerCAmelCase =platform.system()
if system == "Linux":
_lowerCAmelCase ="alsa"
_lowerCAmelCase ="default"
elif system == "Darwin":
_lowerCAmelCase ="avfoundation"
_lowerCAmelCase =":0"
elif system == "Windows":
_lowerCAmelCase ="dshow"
_lowerCAmelCase ="default"
_lowerCAmelCase =[
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
_lowerCAmelCase =int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_lowerCAmelCase =_ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def UpperCamelCase__ ( a__ , a__ , a__ = None , a__ = None , a__ = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
_lowerCAmelCase =stream_chunk_s
else:
_lowerCAmelCase =chunk_length_s
_lowerCAmelCase =ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
_lowerCAmelCase =np.intaa
_lowerCAmelCase =2
elif format_for_conversion == "f32le":
_lowerCAmelCase =np.floataa
_lowerCAmelCase =4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
_lowerCAmelCase =chunk_length_s / 6
_lowerCAmelCase =int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
_lowerCAmelCase =[stride_length_s, stride_length_s]
_lowerCAmelCase =int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_lowerCAmelCase =int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_lowerCAmelCase =datetime.datetime.now()
_lowerCAmelCase =datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
_lowerCAmelCase =np.frombuffer(item['raw'] , dtype=a__ )
_lowerCAmelCase =(
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
_lowerCAmelCase =sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def UpperCamelCase__ ( a__ , a__ , a__ , a__ = False ):
'''simple docstring'''
_lowerCAmelCase =b""
_lowerCAmelCase =stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
_lowerCAmelCase =0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
_lowerCAmelCase =(_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
_lowerCAmelCase =(_stride_left, stride_right)
_lowerCAmelCase ={"raw": acc[:chunk_len], "stride": stride}
if stream:
_lowerCAmelCase =False
yield item
_lowerCAmelCase =stride_left
_lowerCAmelCase =acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
_lowerCAmelCase ={"raw": acc, "stride": (_stride_left, 0)}
if stream:
_lowerCAmelCase =False
yield item
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =2**2_4 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
_lowerCAmelCase =ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 718
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
lowercase_ = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE ( __lowerCamelCase):
"""simple docstring"""
lowercase : Optional[int] = 'tapas'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=1024 , __A=[3, 256, 256, 2, 256, 256, 10] , __A=0.02 , __A=1E-12 , __A=0 , __A=10.0 , __A=0 , __A=1.0 , __A=None , __A=1.0 , __A=False , __A=None , __A=1.0 , __A=1.0 , __A=False , __A=False , __A="ratio" , __A=None , __A=None , __A=64 , __A=32 , __A=False , __A=True , __A=False , __A=False , __A=True , __A=False , __A=None , __A=None , **__A , ) -> List[str]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_sizes
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
# Fine-tuning task hyperparameters
_lowerCAmelCase =positive_label_weight
_lowerCAmelCase =num_aggregation_labels
_lowerCAmelCase =aggregation_loss_weight
_lowerCAmelCase =use_answer_as_supervision
_lowerCAmelCase =answer_loss_importance
_lowerCAmelCase =use_normalized_answer_loss
_lowerCAmelCase =huber_loss_delta
_lowerCAmelCase =temperature
_lowerCAmelCase =aggregation_temperature
_lowerCAmelCase =use_gumbel_for_cells
_lowerCAmelCase =use_gumbel_for_aggregation
_lowerCAmelCase =average_approximation_function
_lowerCAmelCase =cell_selection_preference
_lowerCAmelCase =answer_loss_cutoff
_lowerCAmelCase =max_num_rows
_lowerCAmelCase =max_num_columns
_lowerCAmelCase =average_logits_per_cell
_lowerCAmelCase =select_one_column
_lowerCAmelCase =allow_empty_column_selection
_lowerCAmelCase =init_cell_selection_weights_to_zero
_lowerCAmelCase =reset_position_index_per_cell
_lowerCAmelCase =disable_per_token_loss
# Aggregation hyperparameters
_lowerCAmelCase =aggregation_labels
_lowerCAmelCase =no_aggregation_label_index
if isinstance(self.aggregation_labels , SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase ={int(SCREAMING_SNAKE_CASE_ ): v for k, v in aggregation_labels.items()}
| 719
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 0
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
def constraint_to_multiple_of(a__ , a__ , a__=0 , a__=None ):
_lowerCAmelCase =round(val / multiple ) * multiple
if max_val is not None and x > max_val:
_lowerCAmelCase =math.floor(val / multiple ) * multiple
if x < min_val:
_lowerCAmelCase =math.ceil(val / multiple ) * multiple
return x
_lowerCAmelCase =(output_size, output_size) if isinstance(a__ , a__ ) else output_size
_lowerCAmelCase =get_image_size(a__ )
_lowerCAmelCase =output_size
# determine new height and width
_lowerCAmelCase =output_height / input_height
_lowerCAmelCase =output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
_lowerCAmelCase =scale_width
else:
# fit height
_lowerCAmelCase =scale_height
_lowerCAmelCase =constraint_to_multiple_of(scale_height * input_height , multiple=a__ )
_lowerCAmelCase =constraint_to_multiple_of(scale_width * input_width , multiple=a__ )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE ( _snake_case):
"""simple docstring"""
lowercase : Any = ["""pixel_values"""]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BILINEAR , __A = False , __A = 1 , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ) -> str:
super().__init__(**snake_case_ )
_lowerCAmelCase =size if size is not None else {"height": 384, "width": 384}
_lowerCAmelCase =get_size_dict(snake_case_ )
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =keep_aspect_ratio
_lowerCAmelCase =ensure_multiple_of
_lowerCAmelCase =resample
_lowerCAmelCase =do_rescale
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase =image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self , __A , __A , __A = False , __A = 1 , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> Optional[int]:
_lowerCAmelCase =get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_lowerCAmelCase =get_resize_output_image_size(
snake_case_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=snake_case_ , multiple=snake_case_ , )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A , ) -> List[str]:
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def UpperCamelCase__ ( self , __A , __A , __A , __A = None , **__A , ) -> List[Any]:
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def UpperCamelCase__ ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> Any:
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =size if size is not None else self.size
_lowerCAmelCase =get_size_dict(snake_case_ )
_lowerCAmelCase =keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
_lowerCAmelCase =ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
_lowerCAmelCase =resample if resample is not None else self.resample
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase =image_std if image_std is not None else self.image_std
_lowerCAmelCase =make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase =[to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_lowerCAmelCase =[self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_rescale:
_lowerCAmelCase =[self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_lowerCAmelCase =[self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_lowerCAmelCase =[to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_lowerCAmelCase ={"pixel_values": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
def UpperCamelCase__ ( self , __A , __A = None ) -> int:
_lowerCAmelCase =outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(snake_case_ ):
_lowerCAmelCase =target_sizes.numpy()
_lowerCAmelCase =[]
for idx in range(len(snake_case_ ) ):
_lowerCAmelCase =torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=snake_case_ )
_lowerCAmelCase =resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case_ )
else:
_lowerCAmelCase =logits.argmax(dim=1 )
_lowerCAmelCase =[semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 720
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowercase__):
"""simple docstring"""
def __init__( self , *__A , **__A ) -> Dict:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 721
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
print(F'''Vertex\tShortest Distance from vertex {src}''' )
for i, d in enumerate(_lowerCAmelCase ):
print(F'''{i}\t\t{d}''' )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
for j in range(_lowerCAmelCase ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =(graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =[float('inf' )] * vertex_count
_lowerCAmelCase =0.0
for _ in range(vertex_count - 1 ):
for j in range(_lowerCAmelCase ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =(graph[j][k] for k in ['src', 'dst', 'weight'])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
_lowerCAmelCase =distance[u] + w
_lowerCAmelCase =check_negative_cycle(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input('''Enter number of vertices: ''').strip())
lowercase_ = int(input('''Enter number of edges: ''').strip())
lowercase_ = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
lowercase_ , lowercase_ , lowercase_ = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
lowercase_ = {'''src''': src, '''dst''': dest, '''weight''': weight}
lowercase_ = int(input('''\nEnter shortest path source:''').strip())
lowercase_ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 700
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ , a__ = None , a__ = None , a__ = False , ):
'''simple docstring'''
_lowerCAmelCase =cipher_alphabet or [chr(lowerCamelCase_ ) for i in range(9_7 , 1_2_3 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
_lowerCAmelCase ={
'''a''': 0.08_497,
'''b''': 0.01_492,
'''c''': 0.02_202,
'''d''': 0.04_253,
'''e''': 0.11_162,
'''f''': 0.02_228,
'''g''': 0.02_015,
'''h''': 0.06_094,
'''i''': 0.07_546,
'''j''': 0.00_153,
'''k''': 0.01_292,
'''l''': 0.04_025,
'''m''': 0.02_406,
'''n''': 0.06_749,
'''o''': 0.07_507,
'''p''': 0.01_929,
'''q''': 0.00_095,
'''r''': 0.07_587,
'''s''': 0.06_327,
'''t''': 0.09_356,
'''u''': 0.02_758,
'''v''': 0.00_978,
'''w''': 0.02_560,
'''x''': 0.00_150,
'''y''': 0.01_994,
'''z''': 0.00_077,
}
else:
# Custom frequencies dictionary
_lowerCAmelCase =frequencies_dict
if not case_sensitive:
_lowerCAmelCase =ciphertext.lower()
# Chi squared statistic values
_lowerCAmelCase ={}
# cycle through all of the shifts
for shift in range(len(lowerCamelCase_ ) ):
_lowerCAmelCase =''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
_lowerCAmelCase =(alphabet_letters.index(letter.lower() ) - shift) % len(
lowerCamelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
_lowerCAmelCase =0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
_lowerCAmelCase =letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
_lowerCAmelCase =decrypted_with_shift.lower().count(lowerCamelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowerCAmelCase =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowerCAmelCase =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
_lowerCAmelCase =decrypted_with_shift.count(lowerCamelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
_lowerCAmelCase =frequencies[letter] * occurrences
# Complete the chi squared statistic formula
_lowerCAmelCase =((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
_lowerCAmelCase =(
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(a__ ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
_lowerCAmelCase =min(
lowerCamelCase_ , key=lowerCamelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
_lowerCAmelCase
) =chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 701
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 0
|
'''simple docstring'''
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self , __A , __A , __A ) -> int:
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for a, b in zip(_lowercase , _lowercase ):
self.assertAlmostEqual(_lowercase , _lowercase , delta=_lowercase )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_lowercase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =None
ops.enable_eager_execution_internal()
_lowerCAmelCase =tf.config.list_physical_devices('CPU' )
if len(_lowercase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
_lowerCAmelCase =tf.config.list_logical_devices(device_type='CPU' )
_lowerCAmelCase =tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
_lowerCAmelCase =GradientAccumulator()
_lowerCAmelCase =tf.Variable([4.0, 3.0] )
_lowerCAmelCase , _lowerCAmelCase =create_optimizer(5E-5 , 10 , 5 )
_lowerCAmelCase =tf.Variable([0.0, 0.0] , trainable=_lowercase )
def accumulate_on_replica(__A ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__A , __A ):
with strategy.scope():
_lowerCAmelCase =strategy.experimental_local_results(_lowercase )
local_variables[0].assign(_lowercase )
local_variables[1].assign(_lowercase )
strategy.run(_lowercase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_lowercase )
def _check_local_values(__A , __A ):
_lowerCAmelCase =strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _lowercase , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _lowercase , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 702
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A=14 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=3 , __A=4 , __A=None , ) -> Tuple:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_token_type_ids
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_labels
_lowerCAmelCase =use_mc_token_ids
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =num_labels
_lowerCAmelCase =num_choices
_lowerCAmelCase =scope
_lowerCAmelCase =self.vocab_size - 1
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =None
if self.use_token_type_ids:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase =None
if self.use_mc_token_ids:
_lowerCAmelCase =ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase =self.get_config()
_lowerCAmelCase =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase__ ( self ) -> Optional[int]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , *__A ) -> Union[str, Any]:
_lowerCAmelCase =CTRLModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ )
model(lowercase_ , token_type_ids=lowercase_ )
_lowerCAmelCase =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , *__A ) -> Tuple:
_lowerCAmelCase =CTRLLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCAmelCase =model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
_lowerCAmelCase
) =config_and_inputs
_lowerCAmelCase ={"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def UpperCamelCase__ ( self , __A , __A , __A , __A , *__A ) -> Union[str, Any]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =CTRLForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
lowercase : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase : List[str] = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase : Optional[int] = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : str = True
lowercase : Any = False
lowercase : List[str] = False
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A ) -> Optional[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =CTRLModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def UpperCamelCase__ ( self ) -> Optional[int]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowercase_ )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ) -> Tuple:
pass
@slow
def UpperCamelCase__ ( self ) -> int:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =CTRLModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def UpperCamelCase__ ( self ) -> List[str]:
pass
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Dict:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(lowercase_ )
_lowerCAmelCase =torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=lowercase_ ) # Legal the president is
_lowerCAmelCase =[
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
_lowerCAmelCase =model.generate(lowercase_ , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() , lowercase_ )
| 703
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 0
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase_ = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
lowercase_ = {"facebook/blenderbot-3B": 128}
class SCREAMING_SNAKE_CASE ( _UpperCAmelCase):
"""simple docstring"""
lowercase : Optional[Any] = VOCAB_FILES_NAMES
lowercase : int = PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : int = ['''input_ids''', '''attention_mask''']
lowercase : str = BlenderbotTokenizer
def __init__( self , __A=None , __A=None , __A=None , __A="replace" , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=False , __A=True , **__A , ) -> int:
super().__init__(
A_ , A_ , tokenizer_file=A_ , errors=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , unk_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , trim_offsets=A_ , **A_ , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , A_ ) != add_prefix_space:
_lowerCAmelCase =getattr(A_ , pre_tok_state.pop('type' ) )
_lowerCAmelCase =add_prefix_space
_lowerCAmelCase =pre_tok_class(**A_ )
_lowerCAmelCase =add_prefix_space
_lowerCAmelCase ='post_processor'
_lowerCAmelCase =getattr(self.backend_tokenizer , A_ , A_ )
if tokenizer_component_instance:
_lowerCAmelCase =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowerCAmelCase =tuple(state['sep'] )
if "cls" in state:
_lowerCAmelCase =tuple(state['cls'] )
_lowerCAmelCase =False
if state.get('add_prefix_space' , A_ ) != add_prefix_space:
_lowerCAmelCase =add_prefix_space
_lowerCAmelCase =True
if state.get('trim_offsets' , A_ ) != trim_offsets:
_lowerCAmelCase =trim_offsets
_lowerCAmelCase =True
if changes_to_apply:
_lowerCAmelCase =getattr(A_ , state.pop('type' ) )
_lowerCAmelCase =component_class(**A_ )
setattr(self.backend_tokenizer , A_ , A_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCamelCase__ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else value
_lowerCAmelCase =value
def UpperCamelCase__ ( self , *__A , **__A ) -> BatchEncoding:
_lowerCAmelCase =kwargs.get('is_split_into_words' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ , **A_ )
def UpperCamelCase__ ( self , *__A , **__A ) -> BatchEncoding:
_lowerCAmelCase =kwargs.get('is_split_into_words' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ , **A_ )
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self , __A , __A = None ) -> Any:
return token_ids_a + [self.eos_token_id]
def UpperCamelCase__ ( self , __A ) -> List[int]:
_lowerCAmelCase =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(A_ )
_lowerCAmelCase =' '.join(A_ )
_lowerCAmelCase =self.encode(A_ )
if len(A_ ) > self.model_max_length:
_lowerCAmelCase =input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 704
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'distilbert'
lowercase : Union[str, Any] = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self , __A=3_0522 , __A=512 , __A=False , __A=6 , __A=12 , __A=768 , __A=4 * 768 , __A=0.1 , __A=0.1 , __A="gelu" , __A=0.02 , __A=0.1 , __A=0.2 , __A=0 , **__A , ) -> Tuple:
_lowerCAmelCase =vocab_size
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =sinusoidal_pos_embds
_lowerCAmelCase =n_layers
_lowerCAmelCase =n_heads
_lowerCAmelCase =dim
_lowerCAmelCase =hidden_dim
_lowerCAmelCase =dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =activation
_lowerCAmelCase =initializer_range
_lowerCAmelCase =qa_dropout
_lowerCAmelCase =seq_classif_dropout
super().__init__(**__A , pad_token_id=__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase ={0: "batch", 1: "sequence"}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 705
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =inspect.getfile(accelerate.test_utils )
_lowerCAmelCase =os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'external_deps', 'test_metrics.py'] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_lowerCAmelCase =test_metrics
@require_cpu
def UpperCamelCase__ ( self ) -> int:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def UpperCamelCase__ ( self ) -> List[str]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def UpperCamelCase__ ( self ) -> Dict:
self.test_metrics.main()
@require_multi_gpu
def UpperCamelCase__ ( self ) -> int:
print(F'''Found {torch.cuda.device_count()} devices.''' )
_lowerCAmelCase =['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() )
| 706
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 0
|
'''simple docstring'''
class UpperCamelCase_ : # Public class to implement a graph
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase =row
_lowerCAmelCase =col
_lowerCAmelCase =graph
def UpperCamelCase__ ( self , __A , __A , __A ) -> Tuple:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
# Checking all 8 elements surrounding nth element
_lowerCAmelCase =[-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_lowerCAmelCase =[-1, 0, 1, -1, 1, -1, 0, 1]
_lowerCAmelCase =True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , __A )
def UpperCamelCase__ ( self ) -> Optional[int]: # And finally, count all islands.
_lowerCAmelCase =[[False for j in range(self.COL )] for i in range(self.ROW )]
_lowerCAmelCase =0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(__A , __A , __A )
count += 1
return count
| 707
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 0
|
'''simple docstring'''
import string
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =''
for i in sequence:
_lowerCAmelCase =ord(lowerCAmelCase__ )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =string.ascii_letters
_lowerCAmelCase =string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase__ )] if c in letters else c for c in sequence )
def UpperCamelCase__ ( ):
'''simple docstring'''
from timeit import timeit
print('Running performance benchmarks...' )
_lowerCAmelCase ='from string import printable ; from __main__ import atbash, atbash_slow'
print(F'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=lowerCAmelCase__ )} seconds''' )
print(F'''> atbash(): {timeit('atbash(printable)' , setup=lowerCAmelCase__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 708
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.dummy_uncond_unet
_lowerCAmelCase =PNDMScheduler()
_lowerCAmelCase =PNDMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
pndm.to(lowerCamelCase_ )
pndm.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pndm(generator=lowerCamelCase_ , num_inference_steps=20 , output_type='numpy' ).images
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pndm(generator=lowerCamelCase_ , num_inference_steps=20 , output_type='numpy' , return_dict=lowerCamelCase_ )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase =np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase ='google/ddpm-cifar10-32'
_lowerCAmelCase =UNetaDModel.from_pretrained(lowerCamelCase_ )
_lowerCAmelCase =PNDMScheduler()
_lowerCAmelCase =PNDMPipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
pndm.to(lowerCamelCase_ )
pndm.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =torch.manual_seed(0 )
_lowerCAmelCase =pndm(generator=lowerCamelCase_ , output_type='numpy' ).images
_lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase =np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , ) -> Optional[Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =13
_lowerCAmelCase =7
_lowerCAmelCase =True
_lowerCAmelCase =True
_lowerCAmelCase =True
_lowerCAmelCase =99
_lowerCAmelCase =32
_lowerCAmelCase =2
_lowerCAmelCase =4
_lowerCAmelCase =37
_lowerCAmelCase ='gelu'
_lowerCAmelCase =0.1
_lowerCAmelCase =0.1
_lowerCAmelCase =512
_lowerCAmelCase =16
_lowerCAmelCase =2
_lowerCAmelCase =0.02
_lowerCAmelCase =3
_lowerCAmelCase =4
_lowerCAmelCase =None
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase =EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> str:
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =self.prepare_config_and_inputs()
_lowerCAmelCase =True
_lowerCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A ) -> List[str]:
_lowerCAmelCase =TFEsmModel(config=UpperCamelCase__ )
_lowerCAmelCase ={'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCAmelCase =model(UpperCamelCase__ )
_lowerCAmelCase =[input_ids, input_mask]
_lowerCAmelCase =model(UpperCamelCase__ )
_lowerCAmelCase =model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Any:
_lowerCAmelCase =True
_lowerCAmelCase =TFEsmModel(config=UpperCamelCase__ )
_lowerCAmelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
_lowerCAmelCase =model(UpperCamelCase__ )
_lowerCAmelCase =[input_ids, input_mask]
_lowerCAmelCase =model(UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ )
# Also check the case where encoder outputs are not passed
_lowerCAmelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase =TFEsmForMaskedLM(config=UpperCamelCase__ )
_lowerCAmelCase =model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =TFEsmForTokenClassification(config=UpperCamelCase__ )
_lowerCAmelCase ={'input_ids': input_ids, 'attention_mask': input_mask}
_lowerCAmelCase =model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , unittest.TestCase):
"""simple docstring"""
lowercase : Tuple = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase : List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase : Tuple = False
lowercase : Optional[int] = False
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =TFEsmModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Any:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase__ )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def UpperCamelCase__ ( self ) -> int:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFEsmModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCamelCase__ ( self ) -> Tuple:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(UpperCamelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowerCAmelCase =model.get_bias()
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
for k, v in name.items():
assert isinstance(UpperCamelCase__ , tf.Variable )
else:
_lowerCAmelCase =model.get_output_embeddings()
assert x is None
_lowerCAmelCase =model.get_bias()
assert name is None
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_lowerCAmelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase =model(UpperCamelCase__ )[0]
_lowerCAmelCase =[1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
_lowerCAmelCase =tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_lowerCAmelCase =tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_lowerCAmelCase =model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase =tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 710
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
lowercase_ = TypeVar('''KT''')
lowercase_ = TypeVar('''VT''')
class SCREAMING_SNAKE_CASE ( Generic[KT, VT]):
"""simple docstring"""
def __init__( self , __A = "root" , __A = None ) -> int:
_lowerCAmelCase =key
_lowerCAmelCase =value
_lowerCAmelCase =[]
def __repr__( self ) -> Union[str, Any]:
return F'''Node({self.key}: {self.value})'''
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
return len(self.forward )
class SCREAMING_SNAKE_CASE ( Generic[KT, VT]):
"""simple docstring"""
def __init__( self , __A = 0.5 , __A = 16 ) -> Tuple:
_lowerCAmelCase =Node[KT, VT]()
_lowerCAmelCase =0
_lowerCAmelCase =p
_lowerCAmelCase =max_level
def __str__( self ) -> List[Any]:
_lowerCAmelCase =list(self )
if len(lowerCAmelCase_ ) == 0:
return F'''SkipList(level={self.level})'''
_lowerCAmelCase =max((len(str(lowerCAmelCase_ ) ) for item in items) , default=4 )
_lowerCAmelCase =max(lowerCAmelCase_ , 4 ) + 4
_lowerCAmelCase =self.head
_lowerCAmelCase =[]
_lowerCAmelCase =node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(lowerCAmelCase_ , '-' ) + '* ' * len(lowerCAmelCase_ ) )
lines.append(' ' * label_size + '| ' * len(lowerCAmelCase_ ) )
while len(node.forward ) != 0:
_lowerCAmelCase =node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(lowerCAmelCase_ , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(lowerCAmelCase_ ) )
_lowerCAmelCase =node.forward
lines.append('None'.ljust(lowerCAmelCase_ ) + '* ' * len(lowerCAmelCase_ ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(lowerCAmelCase_ )
def __iter__( self ) -> Optional[int]:
_lowerCAmelCase =self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_lowerCAmelCase =node.forward[0]
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCamelCase__ ( self , __A ) -> List[str]:
_lowerCAmelCase =[]
_lowerCAmelCase =self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_lowerCAmelCase =node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowerCAmelCase_ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCamelCase__ ( self , __A ) -> List[str]:
_lowerCAmelCase , _lowerCAmelCase =self._locate_node(lowerCAmelCase_ )
if node is not None:
for i, update_node in enumerate(lowerCAmelCase_ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_lowerCAmelCase =node.forward[i]
else:
_lowerCAmelCase =update_node.forward[:i]
def UpperCamelCase__ ( self , __A , __A ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase =self._locate_node(lowerCAmelCase_ )
if node is not None:
_lowerCAmelCase =value
else:
_lowerCAmelCase =self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowerCAmelCase_ ):
update_vector.append(self.head )
_lowerCAmelCase =level
_lowerCAmelCase =Node(lowerCAmelCase_ , lowerCAmelCase_ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowerCAmelCase_ )
else:
_lowerCAmelCase =new_node
def UpperCamelCase__ ( self , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase =self._locate_node(lowerCAmelCase_ )
if node is not None:
return node.value
return None
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 1_2 )
skip_list.insert('Key3' , 4_1 )
skip_list.insert('Key4' , -1_9 )
_lowerCAmelCase =skip_list.head
_lowerCAmelCase ={}
while node.level != 0:
_lowerCAmelCase =node.forward[0]
_lowerCAmelCase =node.value
assert len(__lowerCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 1_2
assert all_values["Key3"] == 4_1
assert all_values["Key4"] == -1_9
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 1_0 )
skip_list.insert('Key1' , 1_2 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 1_0 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 1_0 )
_lowerCAmelCase =skip_list.head
_lowerCAmelCase ={}
while node.level != 0:
_lowerCAmelCase =node.forward[0]
_lowerCAmelCase =node.value
if len(__lowerCAmelCase ) != 4:
print()
assert len(__lowerCAmelCase ) == 4
assert all_values["Key1"] == 1_2
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 1_0
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
assert skip_list.find('Some key' ) is None
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key2' , 2_0 )
assert skip_list.find('Key2' ) == 2_0
skip_list.insert('Some Key' , 1_0 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 1_3 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 1_0
assert skip_list.find('V' ) == 1_3
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 1_4
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 1_2
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 1_5
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert('Key1' , 1_2 )
skip_list.insert('V' , 1_3 )
skip_list.insert('X' , 1_4_2 )
skip_list.insert('Key2' , 1_5 )
skip_list.delete('X' )
def traverse_keys(a__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__lowerCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def UpperCamelCase__ ( ):
'''simple docstring'''
def is_sorted(a__ ):
return all(next_item >= item for item, next_item in zip(__lowerCAmelCase , lst[1:] ) )
_lowerCAmelCase =SkipList()
for i in range(1_0 ):
skip_list.insert(__lowerCAmelCase , __lowerCAmelCase )
assert is_sorted(list(__lowerCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__lowerCAmelCase ) )
skip_list.insert(-1_2 , -1_2 )
skip_list.insert(7_7 , 7_7 )
assert is_sorted(list(__lowerCAmelCase ) )
def UpperCamelCase__ ( ):
'''simple docstring'''
for _ in range(1_0_0 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 711
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _UpperCamelCase):
"""simple docstring"""
lowercase : str = ['pixel_values']
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , __A = True , **__A , ) -> str:
super().__init__(**_UpperCAmelCase )
_lowerCAmelCase =size if size is not None else {'''height''': 384, '''width''': 384}
_lowerCAmelCase =get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_lowerCAmelCase =do_resize
_lowerCAmelCase =size
_lowerCAmelCase =resample
_lowerCAmelCase =do_rescale
_lowerCAmelCase =rescale_factor
_lowerCAmelCase =do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCAmelCase =image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCAmelCase =do_convert_rgb
def UpperCamelCase__ ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> Dict:
_lowerCAmelCase =get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
_lowerCAmelCase =(size['''height'''], size['''width'''])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase__ ( self , __A , __A , __A = None , **__A , ) -> int:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase__ ( self , __A , __A , __A , __A = None , **__A , ) -> str:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase__ ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> List[Any]:
_lowerCAmelCase =do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase =resample if resample is not None else self.resample
_lowerCAmelCase =do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase =do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase =image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase =image_std if image_std is not None else self.image_std
_lowerCAmelCase =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCAmelCase =size if size is not None else self.size
_lowerCAmelCase =get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
_lowerCAmelCase =make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCAmelCase =[convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
_lowerCAmelCase =[to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
_lowerCAmelCase =[self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
_lowerCAmelCase =[self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
_lowerCAmelCase =[self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
_lowerCAmelCase =[to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
_lowerCAmelCase =BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 712
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =''
for word_or_phrase in separated:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(UpperCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 713
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_lowerCAmelCase =[p / w for p, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
_lowerCAmelCase =sorted(UpperCAmelCase__ )
# declaring useful variables
_lowerCAmelCase =len(UpperCAmelCase__ )
_lowerCAmelCase =0
_lowerCAmelCase =0
_lowerCAmelCase =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_lowerCAmelCase =sorted_profit_by_weight[length - i - 1]
_lowerCAmelCase =profit_by_weight.index(UpperCAmelCase__ )
_lowerCAmelCase =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'''Input profits, weights, and then max_weight (all positive ints) separated by '''
'''spaces.'''
)
lowercase_ = [int(x) for x in input('''Input profits separated by spaces: ''').split()]
lowercase_ = [int(x) for x in input('''Input weights separated by spaces: ''').split()]
lowercase_ = int(input('''Max weight allowed: '''))
# Function Call
calc_profit(profit, weight, max_weight)
| 714
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = """falcon"""
lowercase : Tuple = ["""past_key_values"""]
def __init__( self , __A=6_5024 , __A=4544 , __A=32 , __A=71 , __A=1E-5 , __A=0.02 , __A=True , __A=0.0 , __A=0.0 , __A=None , __A=False , __A=False , __A=True , __A=True , __A=False , __A=11 , __A=11 , **__A , ) -> str:
_lowerCAmelCase =vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase =kwargs.pop('n_embed' , __A )
_lowerCAmelCase =hidden_size if n_embed is None else n_embed
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =layer_norm_epsilon
_lowerCAmelCase =initializer_range
_lowerCAmelCase =use_cache
_lowerCAmelCase =hidden_dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =bos_token_id
_lowerCAmelCase =eos_token_id
_lowerCAmelCase =num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase =alibi
_lowerCAmelCase =new_decoder_architecture
_lowerCAmelCase =multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase =parallel_attn
_lowerCAmelCase =bias
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
@property
def UpperCamelCase__ ( self ) -> List[Any]:
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
return not self.alibi
| 715
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 0
|
'''simple docstring'''
import cva
import numpy as np
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[int]:
if k in (0.04, 0.06):
_lowerCAmelCase =k
_lowerCAmelCase =window_size
else:
raise ValueError('invalid k value' )
def __str__( self ) -> Optional[int]:
return str(self.k )
def UpperCamelCase__ ( self , __A ) -> Any:
_lowerCAmelCase =cva.imread(__A , 0 )
_lowerCAmelCase , _lowerCAmelCase =img.shape
_lowerCAmelCase =[]
_lowerCAmelCase =img.copy()
_lowerCAmelCase =cva.cvtColor(__A , cva.COLOR_GRAY2RGB )
_lowerCAmelCase , _lowerCAmelCase =np.gradient(__A )
_lowerCAmelCase =dx**2
_lowerCAmelCase =dy**2
_lowerCAmelCase =dx * dy
_lowerCAmelCase =0.04
_lowerCAmelCase =self.window_size // 2
for y in range(__A , h - offset ):
for x in range(__A , w - offset ):
_lowerCAmelCase =ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase =iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase =ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCAmelCase =(wxx * wyy) - (wxy**2)
_lowerCAmelCase =wxx + wyy
_lowerCAmelCase =det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowercase_ = HarrisCorner(0.04, 3)
lowercase_ = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 716
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 0
|
'''simple docstring'''
lowercase_ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowercase_ = [None] * 1000_0000
lowercase_ = True
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowerCAmelCase =chain(next_number(a__ ) )
_lowerCAmelCase =number_chain
while number < 1_0_0_0_0_0_0_0:
_lowerCAmelCase =number_chain
number *= 1_0
return number_chain
def UpperCamelCase__ ( a__ = 1_0_0_0_0_0_0_0 ):
'''simple docstring'''
for i in range(1 , a__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 717
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = '▁'
lowercase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowercase_ = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
lowercase_ = {
'facebook/mbart-large-50-one-to-many-mmt': 1024,
}
# fmt: off
lowercase_ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class SCREAMING_SNAKE_CASE ( UpperCamelCase_):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = ['input_ids', 'attention_mask']
lowercase : Optional[Any] = []
lowercase : Dict = []
def __init__( self , __A , __A=None , __A=None , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A = None , **__A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__A , tgt_lang=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
_lowerCAmelCase =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase =1
_lowerCAmelCase =len(self.sp_model )
_lowerCAmelCase ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A )
}
_lowerCAmelCase ={v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase =src_lang if src_lang is not None else '''en_XX'''
_lowerCAmelCase =self.lang_code_to_id[self._src_lang]
_lowerCAmelCase =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase__ ( self ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self , __A ) -> None:
_lowerCAmelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __A ) -> None:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self , __A ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def UpperCamelCase__ ( self , __A ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase =self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self , __A ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =[]
_lowerCAmelCase =''''''
_lowerCAmelCase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCAmelCase =True
_lowerCAmelCase =[]
else:
current_sub_tokens.append(__A )
_lowerCAmelCase =False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , 'wb' ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def UpperCamelCase__ ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
_lowerCAmelCase =[1] * len(self.prefix_tokens )
_lowerCAmelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self , __A , __A , __A , __A , **__A ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_lowerCAmelCase =src_lang
_lowerCAmelCase =self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
_lowerCAmelCase =self.convert_tokens_to_ids(__A )
_lowerCAmelCase =tgt_lang_id
return inputs
def UpperCamelCase__ ( self , __A , __A = "en_XX" , __A = None , __A = "ro_RO" , **__A , ) -> BatchEncoding:
_lowerCAmelCase =src_lang
_lowerCAmelCase =tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def UpperCamelCase__ ( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self , __A ) -> None:
_lowerCAmelCase =self.lang_code_to_id[src_lang]
_lowerCAmelCase =[self.cur_lang_code_id]
_lowerCAmelCase =[self.eos_token_id]
def UpperCamelCase__ ( self , __A ) -> None:
_lowerCAmelCase =self.lang_code_to_id[tgt_lang]
_lowerCAmelCase =[self.cur_lang_code_id]
_lowerCAmelCase =[self.eos_token_id]
| 718
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE ( lowercase_):
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=False , __A=True , __A="None" , __A=3 , __A=4 , __A=None , ) -> List[str]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_token_type_ids
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =num_labels
_lowerCAmelCase =num_choices
_lowerCAmelCase =relative_attention
_lowerCAmelCase =position_biased_input
_lowerCAmelCase =pos_att_type
_lowerCAmelCase =scope
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase =None
if self.use_token_type_ids:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase__ ( self , __A ) -> str:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
_lowerCAmelCase =DebertaVaModel(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A , token_type_ids=__A )[0]
_lowerCAmelCase =model(__A , token_type_ids=__A )[0]
_lowerCAmelCase =model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> str:
_lowerCAmelCase =DebertaVaForMaskedLM(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =DebertaVaForSequenceClassification(__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[str]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =DebertaVaForTokenClassification(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> List[Any]:
_lowerCAmelCase =DebertaVaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =DebertaVaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
_lowerCAmelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase =model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : Union[str, Any] = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Dict = True
lowercase : List[Any] = False
lowercase : Optional[int] = False
lowercase : Dict = False
lowercase : Optional[Any] = False
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =DebertaVaModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__A , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__A )
@slow
def UpperCamelCase__ ( self ) -> Dict:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =DebertaVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def UpperCamelCase__ ( self ) -> int:
pass
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
_lowerCAmelCase =torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_lowerCAmelCase =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase =model(__A , attention_mask=__A )[0]
# compare the actual values for a slice.
_lowerCAmelCase =torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 719
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 0
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowercase_ = sys.version_info >= (3, 10)
def UpperCamelCase__ ( a__=None , a__=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : int
lowercase : float
lowercase : str
lowercase : bool
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : int = 42
lowercase : str = field(default='toto' , metadata={'help': 'help message'})
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : bool = False
lowercase : bool = True
lowercase : Optional[bool] = None
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = """titi"""
lowercase : Optional[int] = """toto"""
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = """titi"""
lowercase : Dict = """toto"""
lowercase : Dict = 42
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : BasicEnum = "toto"
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =BasicEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : MixedTypeEnum = "toto"
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =MixedTypeEnum(self.foo )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : Optional[int] = None
lowercase : Optional[float] = field(default=__lowercase , metadata={'help': 'help message'})
lowercase : Optional[str] = None
lowercase : Optional[List[str]] = list_field(default=[])
lowercase : Optional[List[int]] = list_field(default=[])
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : List[int] = list_field(default=[])
lowercase : List[int] = list_field(default=[1, 2, 3])
lowercase : List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'])
lowercase : List[float] = list_field(default=[0.1, 0.2, 0.3])
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : List[int] = field()
lowercase : str = field()
lowercase : BasicEnum = field()
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =BasicEnum(self.required_enum )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : int
lowercase : "BasicEnum" = field()
lowercase : "Optional[bool]" = None
lowercase : "str" = field(default='toto' , metadata={'help': 'help message'})
lowercase : "List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'])
if is_python_no_less_than_3_10:
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : bool = False
lowercase : bool = True
lowercase : bool | None = None
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : int | None = None
lowercase : float | None = field(default=__lowercase , metadata={'help': 'help message'})
lowercase : str | None = None
lowercase : list[str] | None = list_field(default=[])
lowercase : list[int] | None = list_field(default=[])
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self , __A , __A ) -> str:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
_lowerCAmelCase ={k: v for k, v in vars(__A ).items() if k != 'container'}
_lowerCAmelCase ={k: v for k, v in vars(__A ).items() if k != 'container'}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('choices' , __A ) and yy.get('choices' , __A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['type'](__A ) , yy['type'](__A ) )
del xx["type"], yy["type"]
self.assertEqual(__A , __A )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =HfArgumentParser(__A )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument('--foo' , type=__A , required=__A )
expected.add_argument('--bar' , type=__A , required=__A )
expected.add_argument('--baz' , type=__A , required=__A )
expected.add_argument('--flag' , type=__A , default=__A , const=__A , nargs='?' )
self.argparsersEqual(__A , __A )
_lowerCAmelCase =['--foo', '1', '--baz', 'quux', '--bar', '0.5']
((_lowerCAmelCase ) , ) =parser.parse_args_into_dataclasses(__A , look_for_args_file=__A )
self.assertFalse(example.flag )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =HfArgumentParser(__A )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument('--foo' , default=42 , type=__A )
expected.add_argument('--baz' , default='toto' , type=__A , help='help message' )
self.argparsersEqual(__A , __A )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument('--foo' , type=__A , default=__A , const=__A , nargs='?' )
expected.add_argument('--baz' , type=__A , default=__A , const=__A , nargs='?' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('--no_baz' , action='store_false' , default=__A , dest='baz' )
expected.add_argument('--opt' , type=__A , default=__A )
_lowerCAmelCase =[WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
_lowerCAmelCase =HfArgumentParser(__A )
self.argparsersEqual(__A , __A )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
_lowerCAmelCase =parser.parse_args(['--foo', '--no_baz'] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
_lowerCAmelCase =parser.parse_args(['--foo', '--baz'] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
_lowerCAmelCase =parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
_lowerCAmelCase =parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =HfArgumentParser(__A )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__A , __A )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
_lowerCAmelCase =parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
_lowerCAmelCase =parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
_lowerCAmelCase =parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
_lowerCAmelCase =parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
_lowerCAmelCase =parser.parse_args_into_dataclasses(['--foo', '42'] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def UpperCamelCase__ ( self ) -> Tuple:
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : Literal["titi", "toto", 42] = "toto"
_lowerCAmelCase =HfArgumentParser(__A )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument(
'--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , )
self.argparsersEqual(__A , __A )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(args.foo , 'toto' )
_lowerCAmelCase =parser.parse_args(['--foo', 'titi'] )
self.assertEqual(args.foo , 'titi' )
_lowerCAmelCase =parser.parse_args(['--foo', '42'] )
self.assertEqual(args.foo , 42 )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =HfArgumentParser(__A )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument('--foo_int' , nargs='+' , default=[] , type=__A )
expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=__A )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__A )
expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=__A )
self.argparsersEqual(__A , __A )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(
__A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , )
_lowerCAmelCase =parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() )
self.assertEqual(__A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) )
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument('--foo' , default=__A , type=__A )
expected.add_argument('--bar' , default=__A , type=__A , help='help message' )
expected.add_argument('--baz' , default=__A , type=__A )
expected.add_argument('--ces' , nargs='+' , default=[] , type=__A )
expected.add_argument('--des' , nargs='+' , default=[] , type=__A )
_lowerCAmelCase =[OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
_lowerCAmelCase =HfArgumentParser(__A )
self.argparsersEqual(__A , __A )
_lowerCAmelCase =parser.parse_args([] )
self.assertEqual(__A , Namespace(foo=__A , bar=__A , baz=__A , ces=[] , des=[] ) )
_lowerCAmelCase =parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() )
self.assertEqual(__A , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =HfArgumentParser(__A )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument('--required_list' , nargs='+' , type=__A , required=__A )
expected.add_argument('--required_str' , type=__A , required=__A )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__A , )
self.argparsersEqual(__A , __A )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =HfArgumentParser(__A )
_lowerCAmelCase =argparse.ArgumentParser()
expected.add_argument('--foo' , type=__A , required=__A )
expected.add_argument(
'--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__A , )
expected.add_argument('--opt' , type=__A , default=__A )
expected.add_argument('--baz' , default='toto' , type=__A , help='help message' )
expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__A )
self.argparsersEqual(__A , __A )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =HfArgumentParser(__A )
_lowerCAmelCase ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
_lowerCAmelCase =parser.parse_dict(__A )[0]
_lowerCAmelCase =BasicExample(**__A )
self.assertEqual(__A , __A )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =HfArgumentParser(__A )
_lowerCAmelCase ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
'extra': 42,
}
self.assertRaises(__A , parser.parse_dict , __A , allow_extra_keys=__A )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =HfArgumentParser(__A )
_lowerCAmelCase ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase =os.path.join(__A , 'temp_json' )
os.mkdir(__A )
with open(temp_local_path + '.json' , 'w+' ) as f:
json.dump(__A , __A )
_lowerCAmelCase =parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0]
_lowerCAmelCase =BasicExample(**__A )
self.assertEqual(__A , __A )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =HfArgumentParser(__A )
_lowerCAmelCase ={
'foo': 12,
'bar': 3.14,
'baz': '42',
'flag': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase =os.path.join(__A , 'temp_yaml' )
os.mkdir(__A )
with open(temp_local_path + '.yaml' , 'w+' ) as f:
yaml.dump(__A , __A )
_lowerCAmelCase =parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0]
_lowerCAmelCase =BasicExample(**__A )
self.assertEqual(__A , __A )
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =HfArgumentParser(__A )
self.assertIsNotNone(__A )
| 720
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE ( snake_case__):
"""simple docstring"""
def __init__( self , __A , __A = None , __A = None , __A = None , __A = False , __A = False , __A = None , **__A , ) -> Optional[Any]:
super().__init__(
_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_lowerCAmelCase =path_or_paths if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else {self.split: path_or_paths}
_lowerCAmelCase =Text(
cache_dir=_SCREAMING_SNAKE_CASE , data_files=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def UpperCamelCase__ ( self ) -> Any:
# Build iterable dataset
if self.streaming:
_lowerCAmelCase =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
self.builder.download_and_prepare(
download_config=_SCREAMING_SNAKE_CASE , download_mode=_SCREAMING_SNAKE_CASE , verification_mode=_SCREAMING_SNAKE_CASE , base_path=_SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
_lowerCAmelCase =self.builder.as_dataset(
split=self.split , verification_mode=_SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory )
return dataset
| 721
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =graph
self._normalize_graph(__A , __A )
_lowerCAmelCase =len(__A )
_lowerCAmelCase =None
def UpperCamelCase__ ( self , __A , __A ) -> Optional[int]:
if sources is int:
_lowerCAmelCase =[sources]
if sinks is int:
_lowerCAmelCase =[sinks]
if len(__A ) == 0 or len(__A ) == 0:
return
_lowerCAmelCase =sources[0]
_lowerCAmelCase =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__A ) > 1 or len(__A ) > 1:
_lowerCAmelCase =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_lowerCAmelCase =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_lowerCAmelCase =max_input_flow
_lowerCAmelCase =0
_lowerCAmelCase =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_lowerCAmelCase =max_input_flow
_lowerCAmelCase =size - 1
def UpperCamelCase__ ( self ) -> Optional[Any]:
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCamelCase__ ( self , __A ) -> Dict:
_lowerCAmelCase =algorithm(self )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> Dict:
_lowerCAmelCase =flow_network
_lowerCAmelCase =flow_network.verticesCount
_lowerCAmelCase =flow_network.sourceIndex
_lowerCAmelCase =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_lowerCAmelCase =flow_network.graph
_lowerCAmelCase =False
def UpperCamelCase__ ( self ) -> List[str]:
if not self.executed:
self._algorithm()
_lowerCAmelCase =True
def UpperCamelCase__ ( self ) -> Tuple:
pass
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A ) -> List[Any]:
super().__init__(__A )
# use this to save your result
_lowerCAmelCase =-1
def UpperCamelCase__ ( self ) -> Dict:
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A ) -> List[str]:
super().__init__(__A )
_lowerCAmelCase =[[0] * self.verticies_count for i in range(self.verticies_count )]
_lowerCAmelCase =[0] * self.verticies_count
_lowerCAmelCase =[0] * self.verticies_count
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_lowerCAmelCase =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_lowerCAmelCase =0
while i < len(__A ):
_lowerCAmelCase =vertices_list[i]
_lowerCAmelCase =self.heights[vertex_index]
self.process_vertex(__A )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__A ) )
_lowerCAmelCase =0
else:
i += 1
_lowerCAmelCase =sum(self.preflow[self.source_index] )
def UpperCamelCase__ ( self , __A ) -> Union[str, Any]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__A , __A )
self.relabel(__A )
def UpperCamelCase__ ( self , __A , __A ) -> List[Any]:
_lowerCAmelCase =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCamelCase__ ( self , __A ) -> Optional[int]:
_lowerCAmelCase =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_lowerCAmelCase =self.heights[to_index]
if min_height is not None:
_lowerCAmelCase =min_height + 1
if __name__ == "__main__":
lowercase_ = [0]
lowercase_ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowercase_ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowercase_ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowercase_ = flow_network.find_maximum_flow()
print(F'maximum flow is {maximum_flow}')
| 700
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 0
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = ['image_processor', 'tokenizer']
lowercase : List[Any] = 'BlipImageProcessor'
lowercase : str = 'AutoTokenizer'
def __init__( self , __A , __A , __A ) -> Optional[Any]:
super().__init__(__A , __A )
# add QFormer tokenizer
_lowerCAmelCase =qformer_tokenizer
def __call__( self , __A = None , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowerCAmelCase =BatchFeature()
if text is not None:
_lowerCAmelCase =self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
encoding.update(__A )
_lowerCAmelCase =self.qformer_tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
_lowerCAmelCase =qformer_text_encoding.pop('input_ids' )
_lowerCAmelCase =qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A )
encoding.update(__A )
return encoding
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Tuple:
return self.tokenizer.decode(*__A , **__A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCamelCase__ ( self , __A , **__A ) -> List[str]:
if os.path.isfile(__A ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__A , exist_ok=__A )
_lowerCAmelCase =os.path.join(__A , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(__A )
return super().save_pretrained(__A , **__A )
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> int:
_lowerCAmelCase =AutoTokenizer.from_pretrained(__A , subfolder='qformer_tokenizer' )
_lowerCAmelCase =cls._get_arguments_from_pretrained(__A , **__A )
args.append(__A )
return cls(*__A )
| 701
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 0
|
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowercase_ = '''__DUMMY_TRANSFORMERS_USER__'''
lowercase_ = '''Dummy User'''
lowercase_ = '''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'''
lowercase_ = '''https://hub-ci.huggingface.co'''
lowercase_ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
lowercase_ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
lowercase_ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , a__ )
@pytest.fixture
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , a__ )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , a__ )
@pytest.fixture
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , a__ )
@pytest.fixture
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
HfFolder.save_token(a__ )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def UpperCamelCase__ ( ):
'''simple docstring'''
return HfApi(endpoint=a__ )
@pytest.fixture(scope='session' )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =HfFolder.get_token()
HfFolder.save_token(a__ )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(a__ )
@pytest.fixture
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
def _cleanup_repo(a__ ):
hf_api.delete_repo(a__ , token=a__ , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
@contextmanager
def _temporary_repo(a__ ):
try:
yield repo_id
finally:
cleanup_repo(a__ )
return _temporary_repo
@pytest.fixture(scope='session' )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =F'''repo_txt_data-{int(time.time() * 10E3 )}'''
_lowerCAmelCase =F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(a__ , token=a__ , repo_type='dataset' , private=a__ )
hf_api.upload_file(
token=a__ , path_or_fileobj=str(a__ ) , path_in_repo='data/text_data.txt' , repo_id=a__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(a__ , token=a__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =F'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
_lowerCAmelCase =F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(a__ , token=a__ , repo_type='dataset' , private=a__ )
hf_api.upload_file(
token=a__ , path_or_fileobj=str(a__ ) , path_in_repo='data.zip' , repo_id=a__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(a__ , token=a__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =F'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
_lowerCAmelCase =F'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(a__ , token=a__ , repo_type='dataset' , private=a__ )
hf_api.upload_file(
token=a__ , path_or_fileobj=str(a__ ) , path_in_repo='data.zip' , repo_id=a__ , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(a__ , token=a__ , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 702
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(a__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =2
while True:
if is_prime(a__ ):
yield num
num += 1
def UpperCamelCase__ ( a__ = 2_0_0_0_0_0_0 ):
'''simple docstring'''
return sum(takewhile(lambda a__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 703
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 0
|
'''simple docstring'''
from collections import defaultdict
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =True
for v in tree[start]:
if v not in visited:
ret += dfs(a__ )
if ret % 2 == 0:
cuts.append(a__ )
return ret
def UpperCamelCase__ ( ):
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
lowercase_ , lowercase_ = 10, 9
lowercase_ = defaultdict(list)
lowercase_ = {}
lowercase_ = []
lowercase_ = 0
lowercase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 704
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 705
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 0
|
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ = object()
# For specifying empty leaf dict `{}`
lowercase_ = object()
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(a__ ) - len(a__ ) + 1 ):
_lowerCAmelCase =[x.match(a__ ) for x, y in zip(a__ , ks[i:] )]
if matches and all(a__ ):
return True
return False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
def replace(a__ , a__ ):
for rule, replacement in rules:
if _match(a__ , a__ ):
return replacement
return val
return replace
def UpperCamelCase__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , a__ )),
(("transformer", "wte", "embedding"), P('mp' , a__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(a__ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , a__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(a__ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , a__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =_get_partition_rules()
_lowerCAmelCase =_replacement_rules(a__ )
_lowerCAmelCase ={k: _unmatched for k in flatten_dict(a__ )}
_lowerCAmelCase ={k: replace(a__ , a__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(a__ ) )
| 706
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 707
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'ibert'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=False , __A="none" , **__A , ) -> Any:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =quant_mode
_lowerCAmelCase =force_dequant
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 708
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 0
|
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowercase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
return max(metric_fn(a__ , a__ ) for gt in ground_truths )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =[line.strip() for line in open(a__ , 'r' ).readlines()]
_lowerCAmelCase =[]
if args.gold_data_mode == "qa":
_lowerCAmelCase =pd.read_csv(a__ , sep='\t' , header=a__ )
for answer_list in data[1]:
_lowerCAmelCase =ast.literal_eval(a__ )
answers.append(a__ )
else:
_lowerCAmelCase =[line.strip() for line in open(a__ , 'r' ).readlines()]
_lowerCAmelCase =[[reference] for reference in references]
_lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0
for prediction, ground_truths in zip(a__ , a__ ):
total += 1
em += metric_max_over_ground_truths(a__ , a__ , a__ )
fa += metric_max_over_ground_truths(a__ , a__ , a__ )
_lowerCAmelCase =100.0 * em / total
_lowerCAmelCase =100.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =args.k
_lowerCAmelCase =[line.strip() for line in open(a__ , 'r' ).readlines()]
_lowerCAmelCase =[line.strip() for line in open(a__ , 'r' ).readlines()]
_lowerCAmelCase =_lowerCAmelCase =0
for hypo, reference in zip(a__ , a__ ):
_lowerCAmelCase =set(hypo.split('\t' )[:k] )
_lowerCAmelCase =set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_lowerCAmelCase =100.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
def strip_title(a__ ):
if title.startswith('"' ):
_lowerCAmelCase =title[1:]
if title.endswith('"' ):
_lowerCAmelCase =title[:-1]
return title
_lowerCAmelCase =rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a__ , return_tensors='pt' , padding=a__ , truncation=a__ , )['input_ids'].to(args.device )
_lowerCAmelCase =rag_model.rag.question_encoder(a__ )
_lowerCAmelCase =question_enc_outputs[0]
_lowerCAmelCase =rag_model.retriever(
a__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
_lowerCAmelCase =rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_lowerCAmelCase =[]
for docs in all_docs:
_lowerCAmelCase =[strip_title(a__ ) for title in docs['title']]
provenance_strings.append('\t'.join(a__ ) )
return provenance_strings
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase =rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a__ , return_tensors='pt' , padding=a__ , truncation=a__ )
_lowerCAmelCase =inputs_dict.input_ids.to(args.device )
_lowerCAmelCase =inputs_dict.attention_mask.to(args.device )
_lowerCAmelCase =rag_model.generate( # rag_model overwrites generate
a__ , attention_mask=a__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_lowerCAmelCase =rag_model.retriever.generator_tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
if args.print_predictions:
for q, a in zip(a__ , a__ ):
logger.info('Q: {} - A: {}'.format(a__ , a__ ) )
return answers
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=a__ , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=a__ , choices=['exact', 'compressed', 'legacy'] , type=a__ , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=a__ , type=a__ , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=a__ , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=a__ , type=a__ , required=a__ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=a__ , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=a__ , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=a__ , type=a__ , required=a__ , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=a__ , type=a__ , required=a__ , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=a__ , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=a__ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=a__ , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=a__ , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=a__ , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=5_0 , type=a__ , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase ={}
if args.model_type is None:
_lowerCAmelCase =infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
_lowerCAmelCase =RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
_lowerCAmelCase =args.n_docs
if args.index_name is not None:
_lowerCAmelCase =args.index_name
if args.index_path is not None:
_lowerCAmelCase =args.index_path
else:
_lowerCAmelCase =BartForConditionalGeneration
_lowerCAmelCase =(
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , a__ )
_lowerCAmelCase =get_scores if args.eval_mode == 'e2e' else get_precision_at_k
_lowerCAmelCase =evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(a__ , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(a__ ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
_lowerCAmelCase =RagRetriever.from_pretrained(a__ , **a__ )
_lowerCAmelCase =model_class.from_pretrained(a__ , retriever=a__ , **a__ )
model.retriever.init_retrieval()
else:
_lowerCAmelCase =model_class.from_pretrained(a__ , **a__ )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
_lowerCAmelCase =[]
for line in tqdm(a__ ):
questions.append(line.strip() )
if len(a__ ) == args.eval_batch_size:
_lowerCAmelCase =evaluate_batch_fn(a__ , a__ , a__ )
preds_file.write('\n'.join(a__ ) + '\n' )
preds_file.flush()
_lowerCAmelCase =[]
if len(a__ ) > 0:
_lowerCAmelCase =evaluate_batch_fn(a__ , a__ , a__ )
preds_file.write('\n'.join(a__ ) )
preds_file.flush()
score_fn(a__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowercase_ = get_args()
main(args)
| 710
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 0
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCamelCase__ ( a__ , a__ = "cpu" , a__ = None ):
'''simple docstring'''
_lowerCAmelCase =torch.load(a__ , map_location=a__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(a__ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
_lowerCAmelCase =v.half()
if save_path is None: # overwrite src_path
_lowerCAmelCase =src_path
torch.save(a__ , a__ )
if __name__ == "__main__":
fire.Fire(convert)
| 711
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 0
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 712
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 713
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 0
|
'''simple docstring'''
from math import sqrt
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =0
for i in range(1 , int(sqrt(a__ ) + 1 ) ):
if n % i == 0 and i != sqrt(a__ ):
total += i + n // i
elif i == sqrt(a__ ):
total += i
return total - n
def UpperCamelCase__ ( a__ = 1_0_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =sum(
i
for i in range(1 , a__ )
if sum_of_divisors(sum_of_divisors(a__ ) ) == i and sum_of_divisors(a__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 714
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Dict = 'roformer'
def __init__( self , __A=5_0000 , __A=None , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=1536 , __A=2 , __A=0.02 , __A=1E-12 , __A=0 , __A=False , __A=True , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size if embedding_size is None else embedding_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =rotary_value
_lowerCAmelCase =use_cache
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 715
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 0
|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCamelCase__ ( a__ = "isbn/0140328726" ):
'''simple docstring'''
_lowerCAmelCase =olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
_lowerCAmelCase =F'''{olid} is not a valid Open Library olid'''
raise ValueError(a__ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase ={
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
_lowerCAmelCase ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_lowerCAmelCase =[
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
_lowerCAmelCase =data['First sentence']['value']
for key, value in data.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase =', '.join(a__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowercase_ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(F'\nSearching Open Library for ISBN: {isbn}...\n')
try:
lowercase_ = summarize_book(get_openlibrary_data(F'isbn/{isbn}'))
print('''\n'''.join(F'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'Sorry, there are no results for ISBN: {isbn}.')
| 716
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if not isinstance(a__ , a__ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(a__ , a__ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
_lowerCAmelCase =''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(a__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 0
lowercase : bool = False
lowercase : float = 3.0
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=__A ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def UpperCamelCase__ ( self ) -> int:
# If no defaults are changed, `to_kwargs` returns an empty dict.
_lowerCAmelCase =GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_lowerCAmelCase =Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_lowerCAmelCase =accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , __A )
@require_multi_gpu
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__A , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowercase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase_ = torch.nn.Linear(100, 200)
lowercase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase_ = ''''''
lowercase_ = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 718
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 0
|
'''simple docstring'''
import math
import sys
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if number != int(a__ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
_lowerCAmelCase =[-1] * (number + 1)
_lowerCAmelCase =0
for i in range(1 , number + 1 ):
_lowerCAmelCase =sys.maxsize
_lowerCAmelCase =int(math.sqrt(a__ ) )
for j in range(1 , root + 1 ):
_lowerCAmelCase =1 + answers[i - (j**2)]
_lowerCAmelCase =min(a__ , a__ )
_lowerCAmelCase =answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 0
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase__ ( a__ , a__ , a__ = 1 / sqrt(2 ) ):
'''simple docstring'''
_lowerCAmelCase =tau * frequency / samplerate
_lowerCAmelCase =sin(a__ )
_lowerCAmelCase =cos(a__ )
_lowerCAmelCase =_sin / (2 * q_factor)
_lowerCAmelCase =(1 - _cos) / 2
_lowerCAmelCase =1 - _cos
_lowerCAmelCase =1 + alpha
_lowerCAmelCase =-2 * _cos
_lowerCAmelCase =1 - alpha
_lowerCAmelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( a__ , a__ , a__ = 1 / sqrt(2 ) ):
'''simple docstring'''
_lowerCAmelCase =tau * frequency / samplerate
_lowerCAmelCase =sin(a__ )
_lowerCAmelCase =cos(a__ )
_lowerCAmelCase =_sin / (2 * q_factor)
_lowerCAmelCase =(1 + _cos) / 2
_lowerCAmelCase =-1 - _cos
_lowerCAmelCase =1 + alpha
_lowerCAmelCase =-2 * _cos
_lowerCAmelCase =1 - alpha
_lowerCAmelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( a__ , a__ , a__ = 1 / sqrt(2 ) ):
'''simple docstring'''
_lowerCAmelCase =tau * frequency / samplerate
_lowerCAmelCase =sin(a__ )
_lowerCAmelCase =cos(a__ )
_lowerCAmelCase =_sin / (2 * q_factor)
_lowerCAmelCase =_sin / 2
_lowerCAmelCase =0
_lowerCAmelCase =-ba
_lowerCAmelCase =1 + alpha
_lowerCAmelCase =-2 * _cos
_lowerCAmelCase =1 - alpha
_lowerCAmelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( a__ , a__ , a__ = 1 / sqrt(2 ) ):
'''simple docstring'''
_lowerCAmelCase =tau * frequency / samplerate
_lowerCAmelCase =sin(a__ )
_lowerCAmelCase =cos(a__ )
_lowerCAmelCase =_sin / (2 * q_factor)
_lowerCAmelCase =1 - alpha
_lowerCAmelCase =-2 * _cos
_lowerCAmelCase =1 + alpha
_lowerCAmelCase =IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ):
'''simple docstring'''
_lowerCAmelCase =tau * frequency / samplerate
_lowerCAmelCase =sin(a__ )
_lowerCAmelCase =cos(a__ )
_lowerCAmelCase =_sin / (2 * q_factor)
_lowerCAmelCase =1_0 ** (gain_db / 4_0)
_lowerCAmelCase =1 + alpha * big_a
_lowerCAmelCase =-2 * _cos
_lowerCAmelCase =1 - alpha * big_a
_lowerCAmelCase =1 + alpha / big_a
_lowerCAmelCase =-2 * _cos
_lowerCAmelCase =1 - alpha / big_a
_lowerCAmelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ):
'''simple docstring'''
_lowerCAmelCase =tau * frequency / samplerate
_lowerCAmelCase =sin(a__ )
_lowerCAmelCase =cos(a__ )
_lowerCAmelCase =_sin / (2 * q_factor)
_lowerCAmelCase =1_0 ** (gain_db / 4_0)
_lowerCAmelCase =(big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase =(big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase =(big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase =(big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase =2 * sqrt(a__ ) * alpha
_lowerCAmelCase =big_a * (pmc + aaa)
_lowerCAmelCase =2 * big_a * mpc
_lowerCAmelCase =big_a * (pmc - aaa)
_lowerCAmelCase =ppmc + aaa
_lowerCAmelCase =-2 * pmpc
_lowerCAmelCase =ppmc - aaa
_lowerCAmelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase__ ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ):
'''simple docstring'''
_lowerCAmelCase =tau * frequency / samplerate
_lowerCAmelCase =sin(a__ )
_lowerCAmelCase =cos(a__ )
_lowerCAmelCase =_sin / (2 * q_factor)
_lowerCAmelCase =1_0 ** (gain_db / 4_0)
_lowerCAmelCase =(big_a + 1) - (big_a - 1) * _cos
_lowerCAmelCase =(big_a + 1) + (big_a - 1) * _cos
_lowerCAmelCase =(big_a - 1) - (big_a + 1) * _cos
_lowerCAmelCase =(big_a - 1) + (big_a + 1) * _cos
_lowerCAmelCase =2 * sqrt(a__ ) * alpha
_lowerCAmelCase =big_a * (ppmc + aaa)
_lowerCAmelCase =-2 * big_a * pmpc
_lowerCAmelCase =big_a * (ppmc - aaa)
_lowerCAmelCase =pmc + aaa
_lowerCAmelCase =2 * mpc
_lowerCAmelCase =pmc - aaa
_lowerCAmelCase =IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 720
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A=3 , __A=32 , __A=3 , __A=10 , __A=[10, 20, 30, 40] , __A=[1, 1, 2, 1] , __A=True , __A=True , __A="relu" , __A=3 , __A=None , ) -> Union[str, Any]:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =embeddings_size
_lowerCAmelCase =hidden_sizes
_lowerCAmelCase =depths
_lowerCAmelCase =is_training
_lowerCAmelCase =use_labels
_lowerCAmelCase =hidden_act
_lowerCAmelCase =num_labels
_lowerCAmelCase =scope
_lowerCAmelCase =len(__A )
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase =self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> int:
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =TFResNetModel(config=__A )
_lowerCAmelCase =model(__A )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase =self.num_labels
_lowerCAmelCase =TFResNetForImageClassification(__A )
_lowerCAmelCase =model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =config_and_inputs
_lowerCAmelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : Optional[int] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase : List[Any] = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase : List[str] = False
lowercase : Optional[Any] = False
lowercase : List[str] = False
lowercase : Union[str, Any] = False
lowercase : List[str] = False
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =TFResNetModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__A , has_text_modality=__A )
def UpperCamelCase__ ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ) -> Optional[Any]:
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def UpperCamelCase__ ( self ) -> str:
pass
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase =model_class(__A )
_lowerCAmelCase =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase =[*signature.parameters.keys()]
_lowerCAmelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , __A )
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
def check_hidden_states_output(__A , __A , __A ):
_lowerCAmelCase =model_class(__A )
_lowerCAmelCase =model(**self._prepare_for_class(__A , __A ) )
_lowerCAmelCase =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase =self.model_tester.num_stages
self.assertEqual(len(__A ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase , _lowerCAmelCase =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase =['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase =layer_type
_lowerCAmelCase =True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase =True
check_hidden_states_output(__A , __A , __A )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def UpperCamelCase__ ( self ) -> str:
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase =TFResNetModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ) -> Dict:
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase =self.default_image_processor
_lowerCAmelCase =prepare_img()
_lowerCAmelCase =image_processor(images=__A , return_tensors='tf' )
# forward pass
_lowerCAmelCase =model(**__A )
# verify the logits
_lowerCAmelCase =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __A )
_lowerCAmelCase =tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __A , atol=1E-4 ) )
| 721
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = 'ClapFeatureExtractor'
lowercase : List[Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self , __A , __A ) -> Union[str, Any]:
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> List[Any]:
_lowerCAmelCase =kwargs.pop('sampling_rate' , __A )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if audios is not None:
_lowerCAmelCase =self.feature_extractor(
__A , sampling_rate=__A , return_tensors=__A , **__A )
if text is not None and audios is not None:
_lowerCAmelCase =audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> int:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 700
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def UpperCamelCase__ ( a__ = "" ):
'''simple docstring'''
_lowerCAmelCase =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
_lowerCAmelCase =BeautifulSoup(requests.get(a__ ).text , 'html.parser' )
_lowerCAmelCase =soup.find_all('td' , attrs='titleColumn' )
_lowerCAmelCase =soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(a__ , a__ )
}
def UpperCamelCase__ ( a__ = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
_lowerCAmelCase =get_imdb_top_aaa_movies()
with open(a__ , 'w' , newline='' ) as out_file:
_lowerCAmelCase =csv.writer(a__ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 701
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 0
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCamelCase__ ( a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_lowerCAmelCase =cst_fwd.get(a__ , np.inf )
_lowerCAmelCase =cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_lowerCAmelCase =new_cost_f
_lowerCAmelCase =v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_lowerCAmelCase =cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =-1
_lowerCAmelCase =set()
_lowerCAmelCase =set()
_lowerCAmelCase ={source: 0}
_lowerCAmelCase ={destination: 0}
_lowerCAmelCase ={source: None}
_lowerCAmelCase ={destination: None}
_lowerCAmelCase =PriorityQueue()
_lowerCAmelCase =PriorityQueue()
_lowerCAmelCase =np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_lowerCAmelCase , _lowerCAmelCase =queue_forward.get()
visited_forward.add(a__ )
_lowerCAmelCase , _lowerCAmelCase =queue_backward.get()
visited_backward.add(a__ )
_lowerCAmelCase =pass_and_relaxation(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
_lowerCAmelCase =pass_and_relaxation(
a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_lowerCAmelCase =shortest_distance
return shortest_path_distance
lowercase_ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
lowercase_ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'{len(upper_files)} files contain uppercase characters:')
print('''\n'''.join(upper_files) + '''\n''')
lowercase_ = [file for file in filepaths if ''' ''' in file]
if space_files:
print(F'{len(space_files)} files contain space characters:')
print('''\n'''.join(space_files) + '''\n''')
lowercase_ = [file for file in filepaths if '''-''' in file]
if hyphen_files:
print(F'{len(hyphen_files)} files contain hyphen characters:')
print('''\n'''.join(hyphen_files) + '''\n''')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'{len(nodir_files)} files are not in a directory:')
print('''\n'''.join(nodir_files) + '''\n''')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 703
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 705
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 0
|
'''simple docstring'''
import os
from pathlib import Path
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase ={
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowerCAmelCase ={
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
_lowerCAmelCase =F'''{src_lang}-{tgt_lang}'''
_lowerCAmelCase =F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(a__ , exist_ok=a__ )
_lowerCAmelCase =os.path.join(a__ , 'README.md' )
print(F'''Generating {path}''' )
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.write(a__ )
# make sure we are under the root of the project
lowercase_ = Path(__file__).resolve().parent.parent.parent
lowercase_ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase_ , lowercase_ , lowercase_ = model_name.split('''-''')
lowercase_ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 706
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 707
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2_000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ) -> str:
super().__init__()
_lowerCAmelCase =nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
_lowerCAmelCase =nn.Embedding(__A , __A )
_lowerCAmelCase =False
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
_lowerCAmelCase =DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =nn.Dropout(p=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Any:
_lowerCAmelCase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_lowerCAmelCase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_lowerCAmelCase =self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_lowerCAmelCase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_lowerCAmelCase =torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_lowerCAmelCase =self.position_encoding(__A )
_lowerCAmelCase =self.continuous_inputs_projection(__A )
inputs += position_encodings
_lowerCAmelCase =self.dropout(__A )
# decoder: No padding present.
_lowerCAmelCase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_lowerCAmelCase =[(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_lowerCAmelCase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_lowerCAmelCase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_lowerCAmelCase =lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
_lowerCAmelCase =self.decoder_norm(__A )
_lowerCAmelCase =self.post_dropout(__A )
_lowerCAmelCase =self.spec_out(__A )
return spec_out
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A , __A=1E-6 ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ) -> Any:
_lowerCAmelCase =self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
_lowerCAmelCase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_lowerCAmelCase =self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
_lowerCAmelCase =self.layer[-1](__A , __A )
return (hidden_states,)
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaLayerNorm(__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> List[Any]:
# pre_self_attention_layer_norm
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.FiLMLayer(__A , __A )
# Self-attention block
_lowerCAmelCase =self.attention(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[int]:
super().__init__()
_lowerCAmelCase =Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None , __A=None , ) -> Tuple:
_lowerCAmelCase =self.layer_norm(__A )
_lowerCAmelCase =self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return layer_output
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
_lowerCAmelCase =TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
_lowerCAmelCase =TaLayerNorm(__A , eps=__A )
_lowerCAmelCase =nn.Dropout(__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
_lowerCAmelCase =self.layer_norm(__A )
if conditioning_emb is not None:
_lowerCAmelCase =self.film(__A , __A )
_lowerCAmelCase =self.DenseReluDense(__A )
_lowerCAmelCase =hidden_states + self.dropout(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A , __A ) -> Union[str, Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Linear(__A , __A , bias=__A )
_lowerCAmelCase =nn.Dropout(__A )
_lowerCAmelCase =NewGELUActivation()
def UpperCamelCase__ ( self , __A ) -> List[Any]:
_lowerCAmelCase =self.act(self.wi_a(__A ) )
_lowerCAmelCase =self.wi_a(__A )
_lowerCAmelCase =hidden_gelu * hidden_linear
_lowerCAmelCase =self.dropout(__A )
_lowerCAmelCase =self.wo(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A=1E-6 ) -> int:
super().__init__()
_lowerCAmelCase =nn.Parameter(torch.ones(__A ) )
_lowerCAmelCase =eps
def UpperCamelCase__ ( self , __A ) -> Dict:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_lowerCAmelCase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
_lowerCAmelCase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_lowerCAmelCase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def UpperCamelCase__ ( self , __A ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(__A , 3.0 )) ))
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A , __A ) -> Optional[Any]:
super().__init__()
_lowerCAmelCase =nn.Linear(__A , out_features * 2 , bias=__A )
def UpperCamelCase__ ( self , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =self.scale_bias(__A )
_lowerCAmelCase , _lowerCAmelCase =torch.chunk(__A , 2 , -1 )
_lowerCAmelCase =x * (1 + scale) + shift
return x
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
lowercase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'ernie_m'
lowercase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , __A = 25_0002 , __A = 768 , __A = 12 , __A = 12 , __A = 3072 , __A = "gelu" , __A = 0.1 , __A = 0.1 , __A = 514 , __A = 0.02 , __A = 1 , __A = 1E-05 , __A=None , __A=False , __A=0.0 , **__A , ) -> Optional[Any]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =classifier_dropout
_lowerCAmelCase =is_decoder
_lowerCAmelCase =act_dropout
| 708
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowercase_ = False
lowercase_ = False
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return TrainCommand(a__ )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( __A ) -> Tuple:
_lowerCAmelCase =parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__A , required=__A , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__A , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__A , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__A , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__A , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__A , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__A , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__A , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__A , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__A , default=32 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__A , default=64 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__A , default=3E-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__A , default=1E-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> List[str]:
_lowerCAmelCase =logging.get_logger('transformers-cli/training' )
_lowerCAmelCase ='tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__A )
_lowerCAmelCase =args.output
_lowerCAmelCase =args.column_label
_lowerCAmelCase =args.column_text
_lowerCAmelCase =args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
_lowerCAmelCase =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
_lowerCAmelCase =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_lowerCAmelCase =args.validation_split
_lowerCAmelCase =args.train_batch_size
_lowerCAmelCase =args.valid_batch_size
_lowerCAmelCase =args.learning_rate
_lowerCAmelCase =args.adam_epsilon
def UpperCamelCase__ ( self ) -> List[str]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
raise NotImplementedError
def UpperCamelCase__ ( self ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 58
| 0
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowercase : float
lowercase : TreeNode | None = None
lowercase : TreeNode | None = None
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
def is_valid_tree(a__ ) -> bool:
if node is None:
return True
if not isinstance(a__ , a__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(a__ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
a__ , a__ , a__ ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , a__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , a__ )
)
return is_binary_search_tree_recursive_check(a__ , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
| 0
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A = "cpu" , __A = "openai/clip-vit-large-patch14" ) -> None:
_lowerCAmelCase =device
_lowerCAmelCase =CLIPTokenizerFast.from_pretrained(__A )
_lowerCAmelCase =[0.48_145_466, 0.4_578_275, 0.40_821_073]
_lowerCAmelCase =[0.26_862_954, 0.26_130_258, 0.27_577_711]
_lowerCAmelCase =torchvision.transforms.Normalize(self.image_mean , self.image_std )
_lowerCAmelCase =torchvision.transforms.Resize(224 )
_lowerCAmelCase =torchvision.transforms.CenterCrop(224 )
def UpperCamelCase__ ( self , __A ) -> List[str]:
_lowerCAmelCase =self.resize(__A )
_lowerCAmelCase =self.center_crop(__A )
_lowerCAmelCase =self.normalize(__A )
return images
def __call__( self , __A=None , __A=None , **__A ) -> List[Any]:
_lowerCAmelCase =self.tokenizer(text=__A , **__A )
_lowerCAmelCase =self.preprocess_img(__A )
_lowerCAmelCase ={key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A=10 , __A=0.01 , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , __A=False , __A=True , __A="image" , __A=True , __A=False , __A=False , __A=False , ) -> None:
super().__init__()
_lowerCAmelCase =None
_lowerCAmelCase =device if device else get_device()
if vqgan:
_lowerCAmelCase =vqgan
else:
_lowerCAmelCase =load_vqgan(self.device , conf_path=__A , ckpt_path=__A )
self.vqgan.eval()
if clip:
_lowerCAmelCase =clip
else:
_lowerCAmelCase =CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
_lowerCAmelCase =ProcessorGradientFlow(device=self.device )
_lowerCAmelCase =iterations
_lowerCAmelCase =lr
_lowerCAmelCase =log
_lowerCAmelCase =make_grid
_lowerCAmelCase =return_val
_lowerCAmelCase =quantize
_lowerCAmelCase =self.vqgan.decoder.z_shape
def UpperCamelCase__ ( self , __A=None , __A=None , __A=5 , __A=True ) -> Dict:
_lowerCAmelCase =[]
if output_path is None:
_lowerCAmelCase ='./animation.gif'
if input_path is None:
_lowerCAmelCase =self.save_path
_lowerCAmelCase =sorted(glob(input_path + '/*' ) )
if not len(__A ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(__A ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
_lowerCAmelCase =total_duration / len(__A )
_lowerCAmelCase =[frame_duration] * len(__A )
if extend_frames:
_lowerCAmelCase =1.5
_lowerCAmelCase =3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(__A ) )
imageio.mimsave(__A , __A , duration=__A )
print(F'''gif saved to {output_path}''' )
def UpperCamelCase__ ( self , __A=None , __A=None ) -> List[Any]:
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
_lowerCAmelCase =preprocess(Image.open(__A ) , target_image_size=256 ).to(self.device )
_lowerCAmelCase =preprocess_vqgan(__A )
_lowerCAmelCase , *_lowerCAmelCase =self.vqgan.encode(__A )
return z
def UpperCamelCase__ ( self , __A ) -> Any:
_lowerCAmelCase =self.latent.detach().requires_grad_()
_lowerCAmelCase =base_latent + transform_vector
if self.quantize:
_lowerCAmelCase , *_lowerCAmelCase =self.vqgan.quantize(__A )
else:
_lowerCAmelCase =trans_latent
return self.vqgan.decode(__A )
def UpperCamelCase__ ( self , __A , __A , __A=None ) -> Any:
_lowerCAmelCase =self.clip_preprocessor(text=__A , images=__A , return_tensors='pt' , padding=__A )
_lowerCAmelCase =self.clip(**__A )
_lowerCAmelCase =clip_outputs.logits_per_image
if weights is not None:
_lowerCAmelCase =similarity_logits * weights
return similarity_logits.sum()
def UpperCamelCase__ ( self , __A , __A , __A ) -> List[Any]:
_lowerCAmelCase =self._get_clip_similarity(pos_prompts['prompts'] , __A , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
_lowerCAmelCase =self._get_clip_similarity(neg_prompts['prompts'] , __A , weights=neg_prompts['weights'] )
else:
_lowerCAmelCase =torch.tensor([1] , device=self.device )
_lowerCAmelCase =-torch.log(__A ) + torch.log(__A )
return loss
def UpperCamelCase__ ( self , __A , __A , __A ) -> str:
_lowerCAmelCase =torch.randn_like(self.latent , requires_grad=__A , device=self.device )
_lowerCAmelCase =torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_lowerCAmelCase =self._add_vector(__A )
_lowerCAmelCase =loop_post_process(__A )
_lowerCAmelCase =self._get_CLIP_loss(__A , __A , __A )
print('CLIP loss' , __A )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=__A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def UpperCamelCase__ ( self , __A , __A , __A ) -> Tuple:
wandb.init(reinit=__A , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
_lowerCAmelCase =Image.open(__A )
_lowerCAmelCase =image.resize((256, 256) )
wandb.log('Original Image' , wandb.Image(__A ) )
def UpperCamelCase__ ( self , __A ) -> int:
if not prompts:
return []
_lowerCAmelCase =[]
_lowerCAmelCase =[]
if isinstance(__A , __A ):
_lowerCAmelCase =[prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(__A , (tuple, list) ):
_lowerCAmelCase =prompt[0]
_lowerCAmelCase =float(prompt[1] )
elif ":" in prompt:
_lowerCAmelCase , _lowerCAmelCase =prompt.split(':' )
_lowerCAmelCase =float(__A )
else:
_lowerCAmelCase =prompt
_lowerCAmelCase =1.0
processed_prompts.append(__A )
weights.append(__A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__A , device=self.device ),
}
def UpperCamelCase__ ( self , __A , __A=None , __A=None , __A=True , __A=False , __A=True , __A=True , __A=None , ) -> Dict:
if image_path:
_lowerCAmelCase =self._get_latent(__A )
else:
_lowerCAmelCase =torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__A , __A , __A )
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCAmelCase =self.process_prompts(__A )
_lowerCAmelCase =self.process_prompts(__A )
if save_final and save_path is None:
_lowerCAmelCase =os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(__A ):
os.makedirs(__A )
else:
_lowerCAmelCase =save_path + '_' + get_timestamp()
os.makedirs(__A )
_lowerCAmelCase =save_path
_lowerCAmelCase =self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(__A ) )
_lowerCAmelCase =loop_post_process(__A )
for iter, transformed_img in enumerate(self._optimize_CLIP(__A , __A , __A ) ):
if show_intermediate:
show_pil(__A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'Image': wandb.Image(__A )} )
if show_final:
show_pil(__A )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 710
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58
| 0
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =math.sqrt(a__ )
_lowerCAmelCase =1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =np.zeros((kernel_size, kernel_size) )
for i in range(0 , a__ ):
for j in range(0 , a__ ):
_lowerCAmelCase =math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(a__ , a__ )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ , ):
'''simple docstring'''
_lowerCAmelCase =np.zeros(img.shape )
_lowerCAmelCase =get_gauss_kernel(a__ , a__ )
_lowerCAmelCase , _lowerCAmelCase =img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_lowerCAmelCase =get_slice(a__ , a__ , a__ , a__ )
_lowerCAmelCase =img_s - img_s[kernel_size // 2, kernel_size // 2]
_lowerCAmelCase =vec_gaussian(a__ , a__ )
_lowerCAmelCase =np.multiply(a__ , a__ )
_lowerCAmelCase =np.multiply(a__ , a__ )
_lowerCAmelCase =np.sum(a__ ) / np.sum(a__ )
_lowerCAmelCase =val
return imga
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =args[1] if args[1:] else '../image_data/lena.jpg'
_lowerCAmelCase =float(args[2] ) if args[2:] else 1.0
_lowerCAmelCase =float(args[3] ) if args[3:] else 1.0
if args[4:]:
_lowerCAmelCase =int(args[4] )
_lowerCAmelCase =kernel_size + abs(kernel_size % 2 - 1 )
else:
_lowerCAmelCase =5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase_ , lowercase_ , lowercase_ , lowercase_ = parse_args(sys.argv)
lowercase_ = cva.imread(filename, 0)
cva.imshow('''input image''', img)
lowercase_ = img / 255
lowercase_ = out.astype('''float32''')
lowercase_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase_ = out * 255
lowercase_ = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 711
|
'''simple docstring'''
def UpperCamelCase__ ( a__ = 1_0_0_0 ):
'''simple docstring'''
_lowerCAmelCase =2**power
_lowerCAmelCase =0
while n:
_lowerCAmelCase , _lowerCAmelCase =r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
| 0
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=7 , __A=False , __A=True , __A=False , __A=False , __A=19 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=3 , __A=4 , __A=None , ) -> Any:
_lowerCAmelCase =parent
_lowerCAmelCase =batch_size
_lowerCAmelCase =seq_length
_lowerCAmelCase =is_training
_lowerCAmelCase =use_input_mask
_lowerCAmelCase =use_token_type_ids
_lowerCAmelCase =use_labels
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =type_sequence_label_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =num_labels
_lowerCAmelCase =num_choices
_lowerCAmelCase =scope
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase =None
if self.use_input_mask:
_lowerCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase =None
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.use_labels:
_lowerCAmelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase =ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__A , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def UpperCamelCase__ ( self , __A , __A , __A , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =EsmForProteinFolding(config=__A ).float()
model.to(__A )
model.eval()
_lowerCAmelCase =model(__A , attention_mask=__A )
_lowerCAmelCase =model(__A )
_lowerCAmelCase =model(__A )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) =config_and_inputs
_lowerCAmelCase ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : Dict = False
lowercase : str = (EsmForProteinFolding,) if is_torch_available() else ()
lowercase : Dict = ()
lowercase : Union[str, Any] = {} if is_torch_available() else {}
lowercase : Dict = False
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =EsmFoldModelTester(self )
_lowerCAmelCase =ConfigTester(self , config_class=__A , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
@unittest.skip('Does not support attention outputs' )
def UpperCamelCase__ ( self ) -> Tuple:
pass
@unittest.skip
def UpperCamelCase__ ( self ) -> str:
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def UpperCamelCase__ ( self ) -> List[Any]:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def UpperCamelCase__ ( self ) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCamelCase__ ( self ) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCamelCase__ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCamelCase__ ( self ) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def UpperCamelCase__ ( self ) -> List[Any]:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def UpperCamelCase__ ( self ) -> Optional[int]:
pass
@unittest.skip('ESMFold only has one output format.' )
def UpperCamelCase__ ( self ) -> List[str]:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def UpperCamelCase__ ( self ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def UpperCamelCase__ ( self ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def UpperCamelCase__ ( self ) -> Any:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def UpperCamelCase__ ( self ) -> List[str]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCamelCase__ ( self ) -> int:
pass
@require_torch
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
_lowerCAmelCase =torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_lowerCAmelCase =model(__A )['positions']
_lowerCAmelCase =torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __A , atol=1E-4 ) )
| 712
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 0
|
'''simple docstring'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_lowerCAmelCase =set()
return any(
node not in visited and depth_first_search(a__ , a__ , a__ , a__ )
for node in graph )
def UpperCamelCase__ ( a__ , a__ , a__ , a__ ):
'''simple docstring'''
visited.add(a__ )
rec_stk.add(a__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a__ , a__ , a__ , a__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 713
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = 'blip_2_vision_model'
def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =patch_size
_lowerCAmelCase =image_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =hidden_act
_lowerCAmelCase =qkv_bias
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'blip_2_qformer'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]:
super().__init__(pad_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =cross_attention_frequency
_lowerCAmelCase =encoder_hidden_size
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__A )
_lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
_lowerCAmelCase =config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[int] = 'blip-2'
lowercase : Any = True
def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int:
super().__init__(**__A )
if vision_config is None:
_lowerCAmelCase ={}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
_lowerCAmelCase ={}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
_lowerCAmelCase ={}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
_lowerCAmelCase =BlipaVisionConfig(**__A )
_lowerCAmelCase =BlipaQFormerConfig(**__A )
_lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt'
_lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A )
_lowerCAmelCase =self.text_config.tie_word_embeddings
_lowerCAmelCase =self.text_config.is_encoder_decoder
_lowerCAmelCase =num_query_tokens
_lowerCAmelCase =self.vision_config.hidden_size
_lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowerCAmelCase =1.0
_lowerCAmelCase =0.02
@classmethod
def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
_lowerCAmelCase =self.vision_config.to_dict()
_lowerCAmelCase =self.qformer_config.to_dict()
_lowerCAmelCase =self.text_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
| 58
| 0
|
'''simple docstring'''
import math
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('The value of intensity cannot be negative' )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(a__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 714
|
'''simple docstring'''
lowercase_ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase ='Morse code here!'
print(a__ )
_lowerCAmelCase =encrypt(a__ )
print(a__ )
_lowerCAmelCase =decrypt(a__ )
print(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( *__A , **__A ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@require_torch
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase =image_classifier(__A , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(__A ) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
_lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(__A ) , [
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
] , )
@require_tf
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase =image_classifier(__A , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(__A ) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
_lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(__A ) , [
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
[
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
{'score': 0.333, 'label': ANY(__A )},
],
] , )
@slow
@require_torch
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase =image_classifier(__A , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(__A ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
_lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(__A ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
_lowerCAmelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCAmelCase =image_classifier(__A , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(__A ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
_lowerCAmelCase =image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(__A ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 715
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : List[str] = 'data2vec-text'
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> List[Any]:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =hidden_act
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =type_vocab_size
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =classifier_dropout
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase ={0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase ={0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 58
| 0
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def UpperCamelCase__ ( a__ , a__ , a__ = None ):
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
_lowerCAmelCase =quote(a__ )
return hfh.hf_hub_url(a__ , a__ , repo_type='dataset' , revision=a__ )
| 716
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : List[Any] = IFPipeline
lowercase : Tuple = TEXT_TO_IMAGE_PARAMS - {'width', 'height', 'latents'}
lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase : int = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase__ ( self ) -> str:
return self._get_dummy_components()
def UpperCamelCase__ ( self , __A , __A=0 ) -> int:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def UpperCamelCase__ ( self ) -> Tuple:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase__ ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase__ ( self ) -> str:
self._test_save_load_local()
def UpperCamelCase__ ( self ) -> Union[str, Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[Any]:
# if
_lowerCAmelCase =IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
_lowerCAmelCase =IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=__A , tokenizer=__A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
_lowerCAmelCase , _lowerCAmelCase =pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_lowerCAmelCase =None
_lowerCAmelCase =None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_lowerCAmelCase =IFImgaImgPipeline(**pipe_a.components )
_lowerCAmelCase =IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__A , __A , __A , __A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_lowerCAmelCase =IFInpaintingPipeline(**pipe_a.components )
_lowerCAmelCase =IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__A , __A , __A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> str:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A , __A ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , num_inference_steps=2 , generator=__A , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (64, 64, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__A , __A )
# pipeline 2
_start_torch_memory_measurement()
_lowerCAmelCase =torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(__A )
_lowerCAmelCase =floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(__A )
_lowerCAmelCase =pipe_a(
prompt_embeds=__A , negative_prompt_embeds=__A , image=__A , mask_image=__A , original_image=__A , generator=__A , num_inference_steps=2 , output_type='np' , )
_lowerCAmelCase =output.images[0]
assert image.shape == (256, 256, 3)
_lowerCAmelCase =torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__A , __A )
def UpperCamelCase__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 58
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
"""simple docstring"""
lowercase : str = ShapEPipeline
lowercase : int = ['prompt']
lowercase : str = ['prompt']
lowercase : Any = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase : Optional[int] = False
@property
def UpperCamelCase__ ( self ) -> str:
return 32
@property
def UpperCamelCase__ ( self ) -> List[Any]:
return 32
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
return 8
@property
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__A )
@property
def UpperCamelCase__ ( self ) -> int:
torch.manual_seed(0 )
_lowerCAmelCase ={
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowerCAmelCase =PriorTransformer(**__A )
return model
@property
def UpperCamelCase__ ( self ) -> int:
torch.manual_seed(0 )
_lowerCAmelCase ={
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase =ShapERenderer(**__A )
return model
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =self.dummy_prior
_lowerCAmelCase =self.dummy_text_encoder
_lowerCAmelCase =self.dummy_tokenizer
_lowerCAmelCase =self.dummy_renderer
_lowerCAmelCase =HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__A , clip_sample=__A , clip_sample_range=1.0 , )
_lowerCAmelCase ={
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def UpperCamelCase__ ( self , __A , __A=0 ) -> Dict:
if str(__A ).startswith('mps' ):
_lowerCAmelCase =torch.manual_seed(__A )
else:
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(__A )
_lowerCAmelCase ={
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase ='cpu'
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =self.pipeline_class(**__A )
_lowerCAmelCase =pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCAmelCase =pipe(**self.get_dummy_inputs(__A ) )
_lowerCAmelCase =output.images[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase =np.array(
[
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
0.00_039_216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase__ ( self ) -> int:
_lowerCAmelCase =torch_device == 'cpu'
_lowerCAmelCase =True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__A , relax_max_difference=__A , )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =self.pipeline_class(**__A )
_lowerCAmelCase =pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCAmelCase =1
_lowerCAmelCase =2
_lowerCAmelCase =self.get_dummy_inputs(__A )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase =batch_size * [inputs[key]]
_lowerCAmelCase =pipe(**__A , num_images_per_prompt=__A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
_lowerCAmelCase =ShapEPipeline.from_pretrained('openai/shap-e' )
_lowerCAmelCase =pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_lowerCAmelCase =torch.Generator(device=__A ).manual_seed(0 )
_lowerCAmelCase =pipe(
'a shark' , generator=__A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__A , __A )
| 717
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
_lowerCAmelCase =0
_lowerCAmelCase =[0]
_lowerCAmelCase =[0]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
_lowerCAmelCase =[60]
_lowerCAmelCase =[10]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 0 )
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =3
_lowerCAmelCase =[1, 2, 3]
_lowerCAmelCase =[3, 2, 1]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 5 )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase =50
_lowerCAmelCase =[60, 100, 120]
_lowerCAmelCase =[10, 20, 30]
_lowerCAmelCase =len(__A )
self.assertEqual(k.knapsack(__A , __A , __A , __A ) , 220 )
if __name__ == "__main__":
unittest.main()
| 58
| 0
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
lowercase_ = parser.parse_args()
lowercase_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 718
|
'''simple docstring'''
lowercase_ = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 58
| 0
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : int = 1
lowercase : bool = True
lowercase : bool = False
lowercase : bool = False
lowercase : bool = False
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i in range(self.num_layers ):
_lowerCAmelCase =self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
_lowerCAmelCase =resnets
_lowerCAmelCase =attentions
if self.add_downsample:
_lowerCAmelCase =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __A , __A , __A , __A=True ) -> Optional[Any]:
_lowerCAmelCase =()
for resnet, attn in zip(self.resnets , self.attentions ):
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
_lowerCAmelCase =attn(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase =self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : bool = True
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =[]
for i in range(self.num_layers ):
_lowerCAmelCase =self.in_channels if i == 0 else self.out_channels
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =resnets
if self.add_downsample:
_lowerCAmelCase =FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __A , __A , __A=True ) -> Optional[int]:
_lowerCAmelCase =()
for resnet in self.resnets:
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
_lowerCAmelCase =self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : int
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : int = 1
lowercase : bool = True
lowercase : bool = False
lowercase : bool = False
lowercase : bool = False
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for i in range(self.num_layers ):
_lowerCAmelCase =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase =self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
_lowerCAmelCase =resnets
_lowerCAmelCase =attentions
if self.add_upsample:
_lowerCAmelCase =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __A , __A , __A , __A , __A=True ) -> Any:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_lowerCAmelCase =res_hidden_states_tuple[-1]
_lowerCAmelCase =res_hidden_states_tuple[:-1]
_lowerCAmelCase =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
_lowerCAmelCase =attn(__A , __A , deterministic=__A )
if self.add_upsample:
_lowerCAmelCase =self.upsamplers_a(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : int
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : bool = True
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =[]
for i in range(self.num_layers ):
_lowerCAmelCase =self.in_channels if (i == self.num_layers - 1) else self.out_channels
_lowerCAmelCase =self.prev_output_channel if i == 0 else self.out_channels
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =resnets
if self.add_upsample:
_lowerCAmelCase =FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __A , __A , __A , __A=True ) -> Any:
for resnet in self.resnets:
# pop res hidden states
_lowerCAmelCase =res_hidden_states_tuple[-1]
_lowerCAmelCase =res_hidden_states_tuple[:-1]
_lowerCAmelCase =jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
if self.add_upsample:
_lowerCAmelCase =self.upsamplers_a(__A )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
lowercase : int
lowercase : float = 0.0
lowercase : int = 1
lowercase : int = 1
lowercase : bool = False
lowercase : bool = False
lowercase : jnp.dtype = jnp.floataa
def UpperCamelCase__ ( self ) -> int:
# there is always at least one resnet
_lowerCAmelCase =[
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_lowerCAmelCase =[]
for _ in range(self.num_layers ):
_lowerCAmelCase =FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
_lowerCAmelCase =FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
_lowerCAmelCase =resnets
_lowerCAmelCase =attentions
def __call__( self , __A , __A , __A , __A=True ) -> int:
_lowerCAmelCase =self.resnets[0](__A , __A )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_lowerCAmelCase =attn(__A , __A , deterministic=__A )
_lowerCAmelCase =resnet(__A , __A , deterministic=__A )
return hidden_states
| 719
|
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowercase_ = '''sshleifer/mar_enro_6_3_student'''
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[Any]:
super().setUp()
_lowerCAmelCase =cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=__A , )
_lowerCAmelCase =F'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
MarianMTModel.from_pretrained(__A )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Union[str, Any]:
_lowerCAmelCase ={
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
_lowerCAmelCase =(self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
_lowerCAmelCase =F'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
_lowerCAmelCase =['finetune.py'] + bash_script.split() + args
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationModule.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =F'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
_lowerCAmelCase ={
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
_lowerCAmelCase =(
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
_lowerCAmelCase =bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
_lowerCAmelCase =bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
_lowerCAmelCase =bash_script.replace(__A , str(__A ) )
_lowerCAmelCase =self.get_auto_remove_tmp_dir()
_lowerCAmelCase =bash_script.replace('--fp16' , '' )
_lowerCAmelCase =6
_lowerCAmelCase =(
['distillation.py']
+ bash_script.split()
+ [
F'''--output_dir={output_dir}''',
'--gpus=1',
'--learning_rate=1e-3',
F'''--num_train_epochs={epochs}''',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(__A , 'argv' , __A ):
_lowerCAmelCase =argparse.ArgumentParser()
_lowerCAmelCase =pl.Trainer.add_argparse_args(__A )
_lowerCAmelCase =SummarizationDistiller.add_model_specific_args(__A , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
_lowerCAmelCase =distill_main(__A )
# Check metrics
_lowerCAmelCase =load_json(model.metrics_save_path )
_lowerCAmelCase =metrics['val'][0]
_lowerCAmelCase =metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F'''val_avg_{model.val_metric}'''] , __A )
# check lightning ckpt can be loaded and has a reasonable statedict
_lowerCAmelCase =os.listdir(__A )
_lowerCAmelCase =[x for x in contents if x.endswith('.ckpt' )][0]
_lowerCAmelCase =os.path.join(args.output_dir , __A )
_lowerCAmelCase =torch.load(__A , map_location='cpu' )
_lowerCAmelCase ='model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
_lowerCAmelCase ={os.path.basename(__A ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 58
| 0
|
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowercase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE ( nn.Module):
"""simple docstring"""
def __init__( self , __A ) -> Tuple:
super().__init__()
_lowerCAmelCase =torchvision.models.resnetaaa(pretrained=__A )
_lowerCAmelCase =list(model.children() )[:-2]
_lowerCAmelCase =nn.Sequential(*__A )
_lowerCAmelCase =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCamelCase__ ( self , __A ) -> List[Any]:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
_lowerCAmelCase =self.pool(self.model(__A ) )
_lowerCAmelCase =torch.flatten(__A , start_dim=2 )
_lowerCAmelCase =out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , __A , __A , __A , __A , __A ) -> Optional[Any]:
_lowerCAmelCase =[json.loads(__A ) for l in open(__A )]
_lowerCAmelCase =os.path.dirname(__A )
_lowerCAmelCase =tokenizer
_lowerCAmelCase =labels
_lowerCAmelCase =len(__A )
_lowerCAmelCase =max_seq_length
_lowerCAmelCase =transforms
def __len__( self ) -> Tuple:
return len(self.data )
def __getitem__( self , __A ) -> Dict:
_lowerCAmelCase =torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=__A ) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =sentence[0], sentence[1:-1], sentence[-1]
_lowerCAmelCase =sentence[: self.max_seq_length]
_lowerCAmelCase =torch.zeros(self.n_classes )
_lowerCAmelCase =1
_lowerCAmelCase =Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
_lowerCAmelCase =self.transforms(__A )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =[len(row['sentence'] ) for row in batch]
_lowerCAmelCase , _lowerCAmelCase =len(a__ ), max(a__ )
_lowerCAmelCase =torch.zeros(a__ , a__ , dtype=torch.long )
_lowerCAmelCase =torch.zeros(a__ , a__ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(a__ , a__ ) ):
_lowerCAmelCase =input_row['sentence']
_lowerCAmelCase =1
_lowerCAmelCase =torch.stack([row['image'] for row in batch] )
_lowerCAmelCase =torch.stack([row['label'] for row in batch] )
_lowerCAmelCase =torch.stack([row['image_start_token'] for row in batch] )
_lowerCAmelCase =torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def UpperCamelCase__ ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def UpperCamelCase__ ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 720
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : int = 'sequence-classification'
def __init__( self , __A ) -> List[Any]:
if type(__A ) == dict:
_lowerCAmelCase =Namespace(**__A )
_lowerCAmelCase =glue_output_modes[hparams.task]
_lowerCAmelCase =glue_tasks_num_labels[hparams.task]
super().__init__(__A , __A , self.mode )
def UpperCamelCase__ ( self , **__A ) -> Any:
return self.model(**__A )
def UpperCamelCase__ ( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase =outputs[0]
_lowerCAmelCase =self.trainer.lr_schedulers[0]['scheduler']
_lowerCAmelCase ={'loss': loss, 'rate': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.hparams
_lowerCAmelCase =processors[args.task]()
_lowerCAmelCase =processor.get_labels()
for mode in ["train", "dev"]:
_lowerCAmelCase =self._feature_file(__A )
if os.path.exists(__A ) and not args.overwrite_cache:
logger.info('Loading features from cached file %s' , __A )
else:
logger.info('Creating features from dataset file at %s' , args.data_dir )
_lowerCAmelCase =(
processor.get_dev_examples(args.data_dir )
if mode == 'dev'
else processor.get_train_examples(args.data_dir )
)
_lowerCAmelCase =convert_examples_to_features(
__A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('Saving features into cached file %s' , __A )
torch.save(__A , __A )
def UpperCamelCase__ ( self , __A , __A , __A = False ) -> DataLoader:
_lowerCAmelCase ='dev' if mode == 'test' else mode
_lowerCAmelCase =self._feature_file(__A )
logger.info('Loading features from cached file %s' , __A )
_lowerCAmelCase =torch.load(__A )
_lowerCAmelCase =torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_lowerCAmelCase =torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__A , __A , __A , __A ) , batch_size=__A , shuffle=__A , )
def UpperCamelCase__ ( self , __A , __A ) -> List[str]:
_lowerCAmelCase ={'input_ids': batch[0], 'attention_mask': batch[1], 'labels': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_lowerCAmelCase =batch[2] if self.config.model_type in ['bert', 'xlnet', 'albert'] else None
_lowerCAmelCase =self(**__A )
_lowerCAmelCase , _lowerCAmelCase =outputs[:2]
_lowerCAmelCase =logits.detach().cpu().numpy()
_lowerCAmelCase =inputs['labels'].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __A ) -> tuple:
_lowerCAmelCase =torch.stack([x['val_loss'] for x in outputs] ).mean().detach().cpu().item()
_lowerCAmelCase =np.concatenate([x['pred'] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_lowerCAmelCase =np.argmax(__A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_lowerCAmelCase =np.squeeze(__A )
_lowerCAmelCase =np.concatenate([x['target'] for x in outputs] , axis=0 )
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase =[[] for _ in range(out_label_ids.shape[0] )]
_lowerCAmelCase ={**{'val_loss': val_loss_mean}, **compute_metrics(self.hparams.task , __A , __A )}
_lowerCAmelCase =dict(results.items() )
_lowerCAmelCase =results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __A ) -> dict:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =self._eval_end(__A )
_lowerCAmelCase =ret['log']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __A , __A ) -> Any:
BaseTransformer.add_model_specific_args(__A , __A )
parser.add_argument(
'--max_seq_length' , default=128 , type=__A , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--task' , default='' , type=__A , required=__A , help='The GLUE task to run' , )
parser.add_argument(
'--gpus' , default=0 , type=__A , help='The number of GPUs allocated for this, it is by default 0 meaning none' , )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
return parser
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
add_generic_args(a__ , os.getcwd() )
_lowerCAmelCase =GLUETransformer.add_model_specific_args(a__ , os.getcwd() )
_lowerCAmelCase =parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_lowerCAmelCase =os.path.join(
'./results' , F'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
_lowerCAmelCase =GLUETransformer(a__ )
_lowerCAmelCase =generic_train(a__ , a__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_lowerCAmelCase =sorted(glob.glob(os.path.join(args.output_dir , 'checkpoint-epoch=*.ckpt' ) , recursive=a__ ) )
_lowerCAmelCase =model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(a__ )
if __name__ == "__main__":
main()
| 58
| 0
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase_ = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
_lowerCAmelCase =k.replace(a__ , a__ )
return k
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =DEFAULTS.copy()
cfg_kwargs.update(a__ )
_lowerCAmelCase =PegasusConfig(**a__ )
_lowerCAmelCase =PegasusForConditionalGeneration(a__ )
_lowerCAmelCase =torch_model.model.state_dict()
_lowerCAmelCase ={}
for k, v in tf_weights.items():
_lowerCAmelCase =rename_state_dict_key(a__ )
if new_k not in sd:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if "dense" in k or "proj" in new_k:
_lowerCAmelCase =v.T
_lowerCAmelCase =torch.tensor(a__ , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'''{new_k}, {k}, {v.shape}, {sd[new_k].shape}'''
# make sure embedding.padding_idx is respected
_lowerCAmelCase =torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
_lowerCAmelCase =mapping['shared.weight']
_lowerCAmelCase =mapping['shared.weight']
_lowerCAmelCase ={k: torch.zeros_like(a__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**a__ )
_lowerCAmelCase , _lowerCAmelCase =torch_model.model.load_state_dict(a__ , strict=a__ )
_lowerCAmelCase =[
k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight']
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def UpperCamelCase__ ( a__="./ckpt/aeslc/model.ckpt-32000" ):
'''simple docstring'''
_lowerCAmelCase =tf.train.list_variables(a__ )
_lowerCAmelCase ={}
_lowerCAmelCase =['Adafactor', 'global_step']
for name, shape in tqdm(a__ , desc='converting tf checkpoint to dict' ):
_lowerCAmelCase =any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCAmelCase =tf.train.load_variable(a__ , a__ )
_lowerCAmelCase =array
return tf_weights
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =Path(a__ ).parent.name
_lowerCAmelCase =task_specific_params[F'''summarization_{dataset}''']['max_position_embeddings']
_lowerCAmelCase =PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=a__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(a__ )
# convert model
_lowerCAmelCase =get_tf_weights_as_numpy(a__ )
_lowerCAmelCase =task_specific_params[F'''summarization_{dataset}''']
if dataset == "large":
_lowerCAmelCase =task_specific_params
_lowerCAmelCase =convert_pegasus(a__ , a__ )
torch_model.save_pretrained(a__ )
_lowerCAmelCase =torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(a__ , Path(a__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
lowercase_ = parser.parse_args()
if args.save_dir is None:
lowercase_ = Path(args.tf_ckpt_path).parent.name
lowercase_ = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 721
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A ) -> None:
_lowerCAmelCase =num_of_nodes
_lowerCAmelCase =[]
_lowerCAmelCase ={}
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase__ ( self , __A ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase__ ( self , __A ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
_lowerCAmelCase =self.find_component(__A )
def UpperCamelCase__ ( self , __A , __A , __A ) -> None:
if component_size[u_node] <= component_size[v_node]:
_lowerCAmelCase =v_node
component_size[v_node] += component_size[u_node]
self.set_component(__A )
elif component_size[u_node] >= component_size[v_node]:
_lowerCAmelCase =self.find_component(__A )
component_size[u_node] += component_size[v_node]
self.set_component(__A )
def UpperCamelCase__ ( self ) -> None:
_lowerCAmelCase =[]
_lowerCAmelCase =0
_lowerCAmelCase =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_lowerCAmelCase =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_lowerCAmelCase =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(__A , __A ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase =edge
_lowerCAmelCase =self.m_component[u]
_lowerCAmelCase =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(__A , __A , __A )
print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
_lowerCAmelCase =[-1] * self.m_num_of_nodes
print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def UpperCamelCase__ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
import datasets
from .evaluate import evaluate
lowercase_ = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
lowercase_ = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
lowercase_ = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class SCREAMING_SNAKE_CASE ( datasets.Metric):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string' ),
'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ),
},
'references': {
'id': datasets.Value('string' ),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
},
} ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , )
def UpperCamelCase__ ( self , __A , __A ) -> List[Any]:
_lowerCAmelCase ={prediction['id']: prediction['prediction_text'] for prediction in predictions}
_lowerCAmelCase =[
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
_lowerCAmelCase =evaluate(dataset=__A , predictions=__A )
return score
| 700
|
'''simple docstring'''
from PIL import Image
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
def brightness(a__ ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(a__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 58
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( __lowercase , __lowercase):
"""simple docstring"""
@register_to_config
def __init__( self , __A , __A = None , __A = None ) -> Tuple:
super().__init__()
_lowerCAmelCase =learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowerCAmelCase =torch.zeros(__A , __A )
else:
_lowerCAmelCase =None
_lowerCAmelCase =torch.nn.Parameter(__A )
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : VQModel
lowercase : CLIPTextModel
lowercase : CLIPTokenizer
lowercase : TransformeraDModel
lowercase : LearnedClassifierFreeSamplingEmbeddings
lowercase : VQDiffusionScheduler
def __init__( self , __A , __A , __A , __A , __A , __A , ) -> int:
super().__init__()
self.register_modules(
vqvae=__A , transformer=__A , text_encoder=__A , tokenizer=__A , scheduler=__A , learned_classifier_free_sampling_embeddings=__A , )
def UpperCamelCase__ ( self , __A , __A , __A ) -> Tuple:
_lowerCAmelCase =len(__A ) if isinstance(__A , __A ) else 1
# get prompt text embeddings
_lowerCAmelCase =self.tokenizer(
__A , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowerCAmelCase =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_lowerCAmelCase =text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase =self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowerCAmelCase =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__A )
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase =prompt_embeds.repeat_interleave(__A , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowerCAmelCase =self.learned_classifier_free_sampling_embeddings.embeddings
_lowerCAmelCase =negative_prompt_embeds.unsqueeze(0 ).repeat(__A , 1 , 1 )
else:
_lowerCAmelCase =[''] * batch_size
_lowerCAmelCase =text_input_ids.shape[-1]
_lowerCAmelCase =self.tokenizer(
__A , padding='max_length' , max_length=__A , truncation=__A , return_tensors='pt' , )
_lowerCAmelCase =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowerCAmelCase =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase =negative_prompt_embeds.shape[1]
_lowerCAmelCase =negative_prompt_embeds.repeat(1 , __A , 1 )
_lowerCAmelCase =negative_prompt_embeds.view(batch_size * num_images_per_prompt , __A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase =torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , __A , __A = 100 , __A = 5.0 , __A = 1.0 , __A = 1 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(__A , __A ):
_lowerCAmelCase =1
elif isinstance(__A , __A ):
_lowerCAmelCase =len(__A )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(__A )}''' )
_lowerCAmelCase =batch_size * num_images_per_prompt
_lowerCAmelCase =guidance_scale > 1.0
_lowerCAmelCase =self._encode_prompt(__A , __A , __A )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(__A )}.''' )
# get the initial completely masked latents unless the user supplied it
_lowerCAmelCase =(batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowerCAmelCase =self.transformer.num_vector_embeds - 1
_lowerCAmelCase =torch.full(__A , __A ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_lowerCAmelCase =latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__A , device=self.device )
_lowerCAmelCase =self.scheduler.timesteps.to(self.device )
_lowerCAmelCase =latents
for i, t in enumerate(self.progress_bar(__A ) ):
# expand the sample if we are doing classifier free guidance
_lowerCAmelCase =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowerCAmelCase =self.transformer(__A , encoder_hidden_states=__A , timestep=__A ).sample
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase =model_output.chunk(2 )
_lowerCAmelCase =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__A , dim=1 , keepdim=__A )
_lowerCAmelCase =self.truncate(__A , __A )
# remove `log(0)`'s (`-inf`s)
_lowerCAmelCase =model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase =self.scheduler.step(__A , timestep=__A , sample=__A , generator=__A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A , __A )
_lowerCAmelCase =self.vqvae.config.vq_embed_dim
_lowerCAmelCase =(batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowerCAmelCase =self.vqvae.quantize.get_codebook_entry(__A , shape=__A )
_lowerCAmelCase =self.vqvae.decode(__A , force_not_quantize=__A ).sample
_lowerCAmelCase =(image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase =self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
def UpperCamelCase__ ( self , __A , __A ) -> torch.FloatTensor:
_lowerCAmelCase , _lowerCAmelCase =torch.sort(__A , 1 , descending=__A )
_lowerCAmelCase =torch.exp(__A )
_lowerCAmelCase =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowerCAmelCase =torch.full_like(keep_mask[:, 0:1, :] , __A )
_lowerCAmelCase =torch.cat((all_true, keep_mask) , dim=1 )
_lowerCAmelCase =keep_mask[:, :-1, :]
_lowerCAmelCase =keep_mask.gather(1 , indices.argsort(1 ) )
_lowerCAmelCase =log_p_x_0.clone()
_lowerCAmelCase =-torch.inf # -inf = log(0)
return rv
| 701
|
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase_ = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 128,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase__ ( cls ) -> Optional[Any]:
_lowerCAmelCase =TOKEN
HfFolder.save_token(__A )
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def UpperCamelCase__ ( self ) -> str:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A , repo_id='test-config' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> Dict:
_lowerCAmelCase =BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__A , repo_id='valid_org/test-config-org' , push_to_hub=__A , use_auth_token=self._token )
_lowerCAmelCase =BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__A , getattr(__A , __A ) )
def UpperCamelCase__ ( self ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowerCAmelCase =CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowerCAmelCase =AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__A )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[Any]:
_lowerCAmelCase =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowerCAmelCase =c.n_embd + 1 # int
_lowerCAmelCase =c.resid_pdrop + 1.0 # float
_lowerCAmelCase =not c.scale_attn_weights # bool
_lowerCAmelCase =c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__A , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(__A , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(__A , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(__A , c.summary_type , 'mismatch for key: summary_type' )
def UpperCamelCase__ ( self ) -> List[str]:
_lowerCAmelCase =PretrainedConfig()
_lowerCAmelCase =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__A , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowerCAmelCase =[key for key, value in config_common_kwargs.items() if value == getattr(__A , __A )]
if len(__A ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(__A )}.''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
with self.assertRaises(__A ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(__A )
def UpperCamelCase__ ( self ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
_lowerCAmelCase =mock.Mock()
_lowerCAmelCase =500
_lowerCAmelCase ={}
_lowerCAmelCase =HTTPError
_lowerCAmelCase ={}
# Download this model to make sure it's in the cache.
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=__A ) as mock_head:
_lowerCAmelCase =BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowerCAmelCase =BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =AutoConfig.from_pretrained('bert-base-cased' )
_lowerCAmelCase =['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__A )
_lowerCAmelCase =2
json.dump(configuration.to_dict() , open(os.path.join(__A , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowerCAmelCase =['config.42.0.0.json']
_lowerCAmelCase =768
configuration.save_pretrained(__A )
shutil.move(os.path.join(__A , 'config.4.0.0.json' ) , os.path.join(__A , 'config.42.0.0.json' ) )
_lowerCAmelCase =AutoConfig.from_pretrained(__A )
self.assertEqual(new_configuration.hidden_size , 768 )
def UpperCamelCase__ ( self ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowerCAmelCase ='hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowerCAmelCase ='v4.0.0'
_lowerCAmelCase , _lowerCAmelCase =new_transformers.models.auto.AutoConfig.from_pretrained(
__A , return_unused_kwargs=__A )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__A , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowerCAmelCase ='v3.0.0'
_lowerCAmelCase =old_transformers.models.auto.AutoConfig.from_pretrained(__A )
self.assertEqual(old_configuration.hidden_size , 768 )
| 58
| 0
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
lowercase_ = '''docs/source/en/_toctree.yml'''
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =defaultdict(a__ )
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(a__ )
_lowerCAmelCase =new_doc_list
_lowerCAmelCase =[key for key, value in counts.items() if value > 1]
_lowerCAmelCase =[]
for duplicate_key in duplicates:
_lowerCAmelCase =list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(a__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_lowerCAmelCase =sorted(a__ , key=lambda a__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(a__ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(a__ )
# Sort
return overview_doc
def UpperCamelCase__ ( a__=False ):
'''simple docstring'''
with open(a__ , encoding='utf-8' ) as f:
_lowerCAmelCase =yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase =0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase =content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase =0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase =api_doc[scheduler_idx]['sections']
_lowerCAmelCase =clean_doc_toc(a__ )
_lowerCAmelCase =False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase =True
if overwrite:
_lowerCAmelCase =new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase =api_doc
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(a__ , allow_unicode=a__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def UpperCamelCase__ ( a__=False ):
'''simple docstring'''
with open(a__ , encoding='utf-8' ) as f:
_lowerCAmelCase =yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase =0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase =content[api_idx]['sections']
# Then to the model doc
_lowerCAmelCase =0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase =False
_lowerCAmelCase =api_doc[pipeline_idx]['sections']
_lowerCAmelCase =[]
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase =pipeline_doc['section']
_lowerCAmelCase =clean_doc_toc(a__ )
if overwrite:
_lowerCAmelCase =new_sub_pipeline_doc
new_pipeline_docs.append(a__ )
# sort overall pipeline doc
_lowerCAmelCase =clean_doc_toc(a__ )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase =True
if overwrite:
_lowerCAmelCase =new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase =api_doc
with open(a__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(a__ , allow_unicode=a__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase_ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 702
|
'''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCamelCase__ ( a__ , a__ , a__ , a__=5 ):
'''simple docstring'''
assert masked_input.count('<mask>' ) == 1
_lowerCAmelCase =torch.tensor(tokenizer.encode(a__ , add_special_tokens=a__ ) ).unsqueeze(0 ) # Batch size 1
_lowerCAmelCase =model(a__ )[0] # The last hidden-state is the first element of the output tuple
_lowerCAmelCase =(input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_lowerCAmelCase =logits[0, masked_index, :]
_lowerCAmelCase =logits.softmax(dim=0 )
_lowerCAmelCase , _lowerCAmelCase =prob.topk(k=a__ , dim=0 )
_lowerCAmelCase =' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(a__ ) )] )
_lowerCAmelCase =tokenizer.mask_token
_lowerCAmelCase =[]
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
_lowerCAmelCase =predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(a__ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(a__ ) , a__ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(a__ , a__ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
lowercase_ = CamembertTokenizer.from_pretrained('''camembert-base''')
lowercase_ = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
lowercase_ = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 703
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 58
| 0
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase_ = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
lowercase_ = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
lowercase_ = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
return float((preds == labels).mean() )
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =simple_accuracy(a__ , a__ )
_lowerCAmelCase =float(fa_score(y_true=a__ , y_pred=a__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase__ ( a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =float(pearsonr(a__ , a__ )[0] )
_lowerCAmelCase =float(spearmanr(a__ , a__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class SCREAMING_SNAKE_CASE ( datasets.Metric):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> int:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def UpperCamelCase__ ( self , __A , __A ) -> str:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__A , __A )}
elif self.config_name == "stsb":
return pearson_and_spearman(__A , __A )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__A , __A )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__A , __A )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 704
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =len(a__ ) // 2
# choose the middle 3 elements
_lowerCAmelCase =lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowercase)
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
def __init__( self , *__A , **__A ) -> Optional[int]:
super().__init__(*__A , **__A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase__ ( self , __A=None , __A=None , __A=None ) -> Tuple:
_lowerCAmelCase ={}
_lowerCAmelCase ={}
if prompt is not None:
_lowerCAmelCase =prompt
if generate_kwargs is not None:
_lowerCAmelCase =generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase ={}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
_lowerCAmelCase =max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , __A , **__A ) -> str:
return super().__call__(__A , **__A )
def UpperCamelCase__ ( self , __A , __A=None ) -> Union[str, Any]:
_lowerCAmelCase =load_image(__A )
if prompt is not None:
if not isinstance(__A , __A ):
raise ValueError(
F'''Received an invalid text input, got - {type(__A )} - but expected a single string. '''
'Note also that one single text can be provided for conditional image to text generation.' )
_lowerCAmelCase =self.model.config.model_type
if model_type == "git":
_lowerCAmelCase =self.image_processor(images=__A , return_tensors=self.framework )
_lowerCAmelCase =self.tokenizer(text=__A , add_special_tokens=__A ).input_ids
_lowerCAmelCase =[self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase =torch.tensor(__A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase =self.image_processor(images=__A , header_text=__A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase =self.image_processor(images=__A , return_tensors=self.framework )
_lowerCAmelCase =self.tokenizer(__A , return_tensors=self.framework )
model_inputs.update(__A )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
_lowerCAmelCase =self.image_processor(images=__A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase =None
return model_inputs
def UpperCamelCase__ ( self , __A , __A=None ) -> List[Any]:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , __A )
and all(x is None for x in model_inputs['input_ids'] )
):
_lowerCAmelCase =None
if generate_kwargs is None:
_lowerCAmelCase ={}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase =model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase =self.model.generate(__A , **__A , **__A )
return model_outputs
def UpperCamelCase__ ( self , __A ) -> List[str]:
_lowerCAmelCase =[]
for output_ids in model_outputs:
_lowerCAmelCase ={
'generated_text': self.tokenizer.decode(
__A , skip_special_tokens=__A , )
}
records.append(__A )
return records
| 705
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 0
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , __A , __A ) -> Union[str, Any]:
_lowerCAmelCase =question_encoder
_lowerCAmelCase =generator
_lowerCAmelCase =self.question_encoder
def UpperCamelCase__ ( self , __A ) -> Optional[int]:
if os.path.isfile(__A ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__A , exist_ok=__A )
_lowerCAmelCase =os.path.join(__A , 'question_encoder_tokenizer' )
_lowerCAmelCase =os.path.join(__A , 'generator_tokenizer' )
self.question_encoder.save_pretrained(__A )
self.generator.save_pretrained(__A )
@classmethod
def UpperCamelCase__ ( cls , __A , **__A ) -> Optional[int]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase =kwargs.pop('config' , __A )
if config is None:
_lowerCAmelCase =RagConfig.from_pretrained(__A )
_lowerCAmelCase =AutoTokenizer.from_pretrained(
__A , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_lowerCAmelCase =AutoTokenizer.from_pretrained(
__A , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=__A , generator=__A )
def __call__( self , *__A , **__A ) -> Union[str, Any]:
return self.current_tokenizer(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.generator.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> List[Any]:
return self.generator.decode(*__A , **__A )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.question_encoder
def UpperCamelCase__ ( self ) -> Optional[int]:
_lowerCAmelCase =self.generator
def UpperCamelCase__ ( self , __A , __A = None , __A = None , __A = None , __A = "longest" , __A = None , __A = True , **__A , ) -> BatchEncoding:
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , __A , )
if max_length is None:
_lowerCAmelCase =self.current_tokenizer.model_max_length
_lowerCAmelCase =self(
__A , add_special_tokens=__A , return_tensors=__A , max_length=__A , padding=__A , truncation=__A , **__A , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase =self.current_tokenizer.model_max_length
_lowerCAmelCase =self(
text_target=__A , add_special_tokens=__A , return_tensors=__A , padding=__A , max_length=__A , truncation=__A , **__A , )
_lowerCAmelCase =labels['input_ids']
return model_inputs
| 706
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = ['image_processor', 'tokenizer']
lowercase : Any = 'CLIPImageProcessor'
lowercase : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __A=None , __A=None , **__A ) -> str:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __A , )
_lowerCAmelCase =kwargs.pop('feature_extractor' )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Optional[int]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowerCAmelCase =self.tokenizer(__A , return_tensors=__A , **__A )
if images is not None:
_lowerCAmelCase =self.image_processor(__A , return_tensors=__A , **__A )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Any:
return self.tokenizer.batch_decode(*__A , **__A )
def UpperCamelCase__ ( self , *__A , **__A ) -> Optional[int]:
return self.tokenizer.decode(*__A , **__A )
@property
def UpperCamelCase__ ( self ) -> Tuple:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCamelCase__ ( self ) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __A , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __A , )
return self.image_processor
| 58
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.