code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
UpperCAmelCase__ = {'''allegro/herbert-base-cased''': 514}
UpperCAmelCase__ = {}
class __lowerCAmelCase ( UpperCAmelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = HerbertTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : Optional[Any]=None , A : Any=None , A : Optional[Any]="<s>" , A : Optional[Any]="<unk>" , A : Union[str, Any]="<pad>" , A : Union[str, Any]="<mask>" , A : str="</s>" , **A : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , **__lowerCAmelCase , )
def _lowerCamelCase ( self : str , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : Any , A : List[int] , A : Optional[List[int]] = None , A : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(__lowerCAmelCase)) + [1]
return [1] + ([0] * len(__lowerCAmelCase)) + [1] + ([0] * len(__lowerCAmelCase)) + [1]
def _lowerCamelCase ( self : List[str] , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase)
return tuple(__lowerCAmelCase)
| 710
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 0
|
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class __lowerCAmelCase ( __UpperCAmelCase ):
UpperCamelCase = """EncodecFeatureExtractor"""
UpperCamelCase = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : Dict , A : Optional[Any] , A : int) -> str:
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
_UpperCAmelCase = self.feature_extractor
_UpperCAmelCase = False
def _lowerCamelCase ( self : str , A : Optional[Any]=None , A : Any=None , A : Any=True) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=UpperCAmelCase_ , language=UpperCAmelCase_ , no_timestamps=UpperCAmelCase_)
def __call__( self : Optional[int] , *A : List[str] , **A : List[Any]) -> Tuple:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_)
_UpperCAmelCase = kwargs.pop('audio' , UpperCAmelCase_)
_UpperCAmelCase = kwargs.pop('sampling_rate' , UpperCAmelCase_)
_UpperCAmelCase = kwargs.pop('text' , UpperCAmelCase_)
if len(UpperCAmelCase_) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.')
if text is not None:
_UpperCAmelCase = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_)
if audio is not None:
_UpperCAmelCase = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_UpperCAmelCase = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
_UpperCAmelCase = audio_inputs['padding_mask']
return inputs
def _lowerCamelCase ( self : List[str] , *A : List[Any] , **A : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = kwargs.pop('audio' , UpperCAmelCase_)
_UpperCAmelCase = kwargs.pop('padding_mask' , UpperCAmelCase_)
if len(UpperCAmelCase_) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if audio_values is not None:
return self._decode_audio(UpperCAmelCase_ , padding_mask=UpperCAmelCase_)
else:
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def _lowerCamelCase ( self : int , *A : Union[str, Any] , **A : Dict) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def _lowerCamelCase ( self : int , A : Tuple , A : List[str] = None) -> str:
"""simple docstring"""
_UpperCAmelCase = to_numpy(UpperCAmelCase_)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = audio_values.shape
if padding_mask is None:
return list(UpperCAmelCase_)
_UpperCAmelCase = to_numpy(UpperCAmelCase_)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_UpperCAmelCase = seq_len - padding_mask.shape[-1]
_UpperCAmelCase = 1 - self.feature_extractor.padding_value
_UpperCAmelCase = np.pad(UpperCAmelCase_ , ((0, 0), (0, difference)) , 'constant' , constant_values=UpperCAmelCase_)
_UpperCAmelCase = audio_values.tolist()
for i in range(UpperCAmelCase_):
_UpperCAmelCase = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_UpperCAmelCase = sliced_audio.reshape(UpperCAmelCase_ , -1)
return audio_values
| 711
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __lowerCAmelCase ( UpperCamelCase_ ):
UpperCamelCase = '''biogpt'''
def __init__( self : List[str] , A : Union[str, Any]=4_23_84 , A : List[Any]=10_24 , A : Optional[int]=24 , A : List[Any]=16 , A : List[str]=40_96 , A : str="gelu" , A : str=0.1 , A : Optional[Any]=0.1 , A : Union[str, Any]=10_24 , A : List[str]=0.0_2 , A : Tuple=1E-12 , A : Optional[Any]=True , A : List[Any]=True , A : str=0.0 , A : Any=0.0 , A : str=1 , A : List[str]=0 , A : Dict=2 , **A : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = scale_embedding
_UpperCAmelCase = use_cache
_UpperCAmelCase = layerdrop
_UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__)
| 712
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 0
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__( self : Dict , A : Dict , A : List[str]=13 , A : Optional[int]=3 , A : Dict=True , A : Dict=True , A : Any=0.1 , A : List[str]=0.1 , A : Any=2_24 , A : Tuple=10_00 , A : Union[str, Any]=[3, 3, 6, 4] , A : Union[str, Any]=[48, 56, 1_12, 2_20] , ) -> Any:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = num_labels
_UpperCAmelCase = image_size
_UpperCAmelCase = layer_depths
_UpperCAmelCase = embed_dims
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels)
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='gelu' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_SCREAMING_SNAKE_CASE , layer_scale_init_value=1E-5 , )
def _lowerCamelCase ( self : int , A : Union[str, Any] , A : Dict , A : List[str]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = SwiftFormerModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7))
def _lowerCamelCase ( self : Optional[int] , A : List[str] , A : int , A : str) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
_UpperCAmelCase = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = self.prepare_config_and_inputs()
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCamelCase = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCAmelCase = SwiftFormerModelTester(self)
_UpperCAmelCase = ConfigTester(
self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='SwiftFormer does not use inputs_embeds')
def _lowerCamelCase ( self : Any) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE)
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear))
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE)
_UpperCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = SwiftFormerModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@unittest.skip(reason='SwiftFormer does not output attentions')
def _lowerCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(A : Optional[Any] , A : Optional[int] , A : Union[str, Any]):
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = 8
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_SCREAMING_SNAKE_CASE)):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
]) , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
def _config_zero_init(A : int):
_UpperCAmelCase = copy.deepcopy(_SCREAMING_SNAKE_CASE)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1E-10)
if isinstance(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE):
_UpperCAmelCase = _config_zero_init(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE))
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
return configs_no_init
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(_SCREAMING_SNAKE_CASE)
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=_SCREAMING_SNAKE_CASE)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
pass
def A ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : int) -> str:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('MBZUAI/swiftformer-xs') if is_vision_available() else None
@slow
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = SwiftFormerForImageClassification.from_pretrained('MBZUAI/swiftformer-xs').to(_SCREAMING_SNAKE_CASE)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt').to(_SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE)
# verify the logits
_UpperCAmelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE)
_UpperCAmelCase = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]]).to(_SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4))
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser(
description=(
"Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"
" Distillation"
)
)
parser.add_argument("--model_type", default="bert", choices=["bert"])
parser.add_argument("--model_name", default="bert-base-uncased", type=str)
parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str)
parser.add_argument("--vocab_transform", action="store_true")
UpperCAmelCase__ = parser.parse_args()
if args.model_type == "bert":
UpperCAmelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
UpperCAmelCase__ = "bert"
else:
raise ValueError("args.model_type should be \"bert\".")
UpperCAmelCase__ = model.state_dict()
UpperCAmelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
UpperCAmelCase__ = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
UpperCAmelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
UpperCAmelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
UpperCAmelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
UpperCAmelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
UpperCAmelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
UpperCAmelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
UpperCAmelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
UpperCAmelCase__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
UpperCAmelCase__ = state_dict["cls.predictions.decoder.weight"]
UpperCAmelCase__ = state_dict["cls.predictions.bias"]
if args.vocab_transform:
for w in ["weight", "bias"]:
UpperCAmelCase__ = state_dict[f"""cls.predictions.transform.dense.{w}"""]
UpperCAmelCase__ = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 714
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 0
|
def _A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError('The given input must be positive' )
# get the generated string sequence
_UpperCAmelCase = gray_code_sequence_string(SCREAMING_SNAKE_CASE_ )
#
# convert them to integers
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
_UpperCAmelCase = int(sequence[i] , 2 )
return sequence
def _A ( _UpperCAmelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCAmelCase = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCAmelCase = gray_code_sequence_string(bit_count - 1 )
_UpperCAmelCase = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCAmelCase = '0' + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCAmelCase = '1' + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase__ = {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( __snake_case ):
UpperCamelCase = 'retribert'
def __init__( self : str , A : Any=3_05_22 , A : Any=7_68 , A : str=8 , A : Union[str, Any]=12 , A : int=30_72 , A : Tuple="gelu" , A : Optional[int]=0.1 , A : List[str]=0.1 , A : Any=5_12 , A : str=2 , A : List[str]=0.0_2 , A : Dict=1E-12 , A : str=True , A : Any=1_28 , A : List[str]=0 , **A : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(pad_token_id=A_ , **A_)
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = share_encoders
_UpperCAmelCase = projection_dim
| 716
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 0
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
UpperCAmelCase__ : str = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Dict = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
UpperCAmelCase__ : Dict = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
UpperCAmelCase__ : int = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCAmelCase__ : Dict = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
UpperCAmelCase__ : Optional[Any] = [
('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''),
('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''),
('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''),
('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''),
('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''),
('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''),
('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''),
('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''),
('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''),
(
'''zero-shot-object-detection''',
'''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''',
'''AutoModelForZeroShotObjectDetection''',
),
('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''),
('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''),
('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''),
('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''),
(
'''table-question-answering''',
'''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForTableQuestionAnswering''',
),
('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''),
('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''),
(
'''next-sentence-prediction''',
'''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''',
'''AutoModelForNextSentencePrediction''',
),
(
'''audio-frame-classification''',
'''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForAudioFrameClassification''',
),
('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''),
(
'''document-question-answering''',
'''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForDocumentQuestionAnswering''',
),
(
'''visual-question-answering''',
'''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''',
'''AutoModelForVisualQuestionAnswering''',
),
('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''),
(
'''zero-shot-image-classification''',
'''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''',
'''AutoModelForZeroShotImageClassification''',
),
('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''),
('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''),
('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''),
]
def A ( _UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _UpperCAmelCase )
return [m.group(0 ) for m in matches]
def A ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_UpperCAmelCase = collections.defaultdict(_UpperCAmelCase )
_UpperCAmelCase = collections.defaultdict(_UpperCAmelCase )
_UpperCAmelCase = collections.defaultdict(_UpperCAmelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_UpperCAmelCase ):
_UpperCAmelCase = None
if _re_tf_models.match(_UpperCAmelCase ) is not None:
_UpperCAmelCase = tf_models
_UpperCAmelCase = _re_tf_models.match(_UpperCAmelCase ).groups()[0]
elif _re_flax_models.match(_UpperCAmelCase ) is not None:
_UpperCAmelCase = flax_models
_UpperCAmelCase = _re_flax_models.match(_UpperCAmelCase ).groups()[0]
elif _re_pt_models.match(_UpperCAmelCase ) is not None:
_UpperCAmelCase = pt_models
_UpperCAmelCase = _re_pt_models.match(_UpperCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_UpperCAmelCase ) > 0:
if attr_name in model_prefix_to_model_type:
_UpperCAmelCase = True
break
# Try again after removing the last word in the name
_UpperCAmelCase = ''.join(camel_case_split(_UpperCAmelCase )[:-1] )
_UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_UpperCAmelCase = list(_UpperCAmelCase )
all_models.sort()
_UpperCAmelCase = {'model_type': all_models}
_UpperCAmelCase = [pt_models[t] for t in all_models]
_UpperCAmelCase = [tf_models[t] for t in all_models]
_UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_UpperCAmelCase = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_UpperCAmelCase = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_UpperCAmelCase = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_UpperCAmelCase = 'AutoTokenizer'
_UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(_UpperCAmelCase )
def A ( _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_UpperCAmelCase = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
_UpperCAmelCase = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ):
continue
# First extract all model_names
_UpperCAmelCase = []
for name in getattr(_UpperCAmelCase , _UpperCAmelCase ).values():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
model_names.append(_UpperCAmelCase )
else:
model_names.extend(list(_UpperCAmelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
_UpperCAmelCase = get_frameworks_table()
_UpperCAmelCase = Dataset.from_pandas(_UpperCAmelCase )
_UpperCAmelCase = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=_UpperCAmelCase )
_UpperCAmelCase = Dataset.from_json(_UpperCAmelCase )
_UpperCAmelCase = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(_UpperCAmelCase ) )
}
_UpperCAmelCase = update_pipeline_and_auto_class_table(_UpperCAmelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_UpperCAmelCase = sorted(table.keys() )
_UpperCAmelCase = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
_UpperCAmelCase = Dataset.from_pandas(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_UpperCAmelCase , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(_UpperCAmelCase , 'pipeline_tags.json' ) )
if commit_sha is not None:
_UpperCAmelCase = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
_UpperCAmelCase = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=_UpperCAmelCase , repo_type='dataset' , token=_UpperCAmelCase , commit_message=_UpperCAmelCase , )
def A ( ) -> int:
'''simple docstring'''
_UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
_UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
_UpperCAmelCase = pipeline_tasks[key]['pt']
if isinstance(_UpperCAmelCase , (list, tuple) ):
_UpperCAmelCase = model[0]
_UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
_UpperCAmelCase = ', '.join(_UpperCAmelCase )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
UpperCAmelCase__ : int = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 717
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self : Tuple , A : List[Any] , A : Union[str, Any] , A : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset')
_UpperCAmelCase = VideoClassificationPipeline(model=lowerCamelCase__ , image_processor=lowerCamelCase__ , top_k=2)
_UpperCAmelCase = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : Tuple) -> List[Any]:
"""simple docstring"""
for example in examples:
_UpperCAmelCase = video_classifier(lowerCamelCase__)
self.assertEqual(
lowerCamelCase__ , [
{'score': ANY(lowerCamelCase__), 'label': ANY(lowerCamelCase__)},
{'score': ANY(lowerCamelCase__), 'label': ANY(lowerCamelCase__)},
] , )
@require_torch
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
_UpperCAmelCase = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10})
_UpperCAmelCase = pipeline(
'video-classification' , model=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , frame_sampling_rate=4)
_UpperCAmelCase = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset')
_UpperCAmelCase = video_classifier(lowerCamelCase__ , top_k=2)
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4) , [{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}] , )
_UpperCAmelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4) , [
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
[{'score': 0.5_1_9_9, 'label': 'LABEL_0'}, {'score': 0.4_8_0_1, 'label': 'LABEL_1'}],
] , )
@require_tf
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
pass
| 718
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase__ = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 719
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 0
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def A ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
'''simple docstring'''
_UpperCAmelCase = cva.getAffineTransform(UpperCamelCase__ , UpperCamelCase__ )
return cva.warpAffine(UpperCamelCase__ , UpperCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCAmelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
UpperCAmelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCAmelCase__ = gray_img.shape
# set different points to rotate image
UpperCAmelCase__ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
UpperCAmelCase__ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
UpperCAmelCase__ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
UpperCAmelCase__ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
UpperCAmelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCAmelCase__ = plt.figure(1)
UpperCAmelCase__ = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 720
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 0
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
UpperCamelCase = 4_2
UpperCamelCase = None
UpperCamelCase = None
def A ( _UpperCAmelCase : TreeNode | None ) -> int:
'''simple docstring'''
def is_valid_tree(_UpperCAmelCase : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_UpperCamelCase ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
_UpperCAmelCase : TreeNode | None , _UpperCAmelCase : float , _UpperCAmelCase : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _UpperCamelCase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _UpperCamelCase )
)
return is_binary_search_tree_recursive_check(_UpperCamelCase , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 0
|
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if number > 0:
raise ValueError('input must be a negative integer' )
_UpperCAmelCase = len(bin(_UpperCAmelCase )[3:] )
_UpperCAmelCase = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
_UpperCAmelCase = (
(
'1'
+ '0' * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 0
|
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False, False, False
@dataclass
class __lowerCAmelCase :
UpperCamelCase = None
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = None
# Automatically constructed
UpperCamelCase = "dict"
UpperCamelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
UpperCamelCase = field(default='''Audio''' , init=A , repr=A )
def __call__( self : Tuple) -> List[Any]:
"""simple docstring"""
return self.pa_type
def _lowerCamelCase ( self : Dict , A : Union[str, bytes, dict]) -> dict:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.') from err
if isinstance(A , A):
return {"bytes": None, "path": value}
elif isinstance(A , A):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_UpperCAmelCase = BytesIO()
sf.write(A , value['array'] , value['sampling_rate'] , format='wav')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path') is not None and os.path.isfile(value['path']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm'):
# "PCM" only has raw audio bytes
if value.get('sampling_rate') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object')
if value.get('bytes'):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_UpperCAmelCase = np.frombuffer(value['bytes'] , dtype=np.intaa).astype(np.floataa) / 3_27_67
else:
_UpperCAmelCase = np.memmap(value['path'] , dtype='h' , mode='r').astype(np.floataa) / 3_27_67
_UpperCAmelCase = BytesIO(bytes())
sf.write(A , A , value['sampling_rate'] , format='wav')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path')}
elif value.get('bytes') is not None or value.get('path') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes'), "path": value.get('path')}
else:
raise ValueError(
F"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.")
def _lowerCamelCase ( self : Dict , A : dict , A : Optional[Dict[str, Union[str, bool, None]]] = None) -> dict:
"""simple docstring"""
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.')
_UpperCAmelCase , _UpperCAmelCase = (value['path'], BytesIO(value['bytes'])) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F"An audio sample should have one of 'path' or 'bytes' but both are None in {value}.")
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.') from err
_UpperCAmelCase = xsplitext(A)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ')
if file is None:
_UpperCAmelCase = token_per_repo_id or {}
_UpperCAmelCase = path.split('::')[-1]
try:
_UpperCAmelCase = string_to_dict(A , config.HUB_DATASETS_URL)['repo_id']
_UpperCAmelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_UpperCAmelCase = None
with xopen(A , 'rb' , use_auth_token=A) as f:
_UpperCAmelCase , _UpperCAmelCase = sf.read(A)
else:
_UpperCAmelCase , _UpperCAmelCase = sf.read(A)
_UpperCAmelCase = array.T
if self.mono:
_UpperCAmelCase = librosa.to_mono(A)
if self.sampling_rate and self.sampling_rate != sampling_rate:
_UpperCAmelCase = librosa.resample(A , orig_sr=A , target_sr=self.sampling_rate)
_UpperCAmelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _lowerCamelCase ( self : int) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.')
return {
"bytes": Value('binary'),
"path": Value('string'),
}
def _lowerCamelCase ( self : Dict , A : Union[pa.StringArray, pa.StructArray]) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type):
_UpperCAmelCase = pa.array([None] * len(A) , type=pa.binary())
_UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
_UpperCAmelCase = pa.array([None] * len(A) , type=pa.string())
_UpperCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('array'):
_UpperCAmelCase = pa.array([Audio().encode_example(A) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('bytes') >= 0:
_UpperCAmelCase = storage.field('bytes')
else:
_UpperCAmelCase = pa.array([None] * len(A) , type=pa.binary())
if storage.type.get_field_index('path') >= 0:
_UpperCAmelCase = storage.field('path')
else:
_UpperCAmelCase = pa.array([None] * len(A) , type=pa.string())
_UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null())
return array_cast(A , self.pa_type)
def _lowerCamelCase ( self : Any , A : pa.StructArray) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(A : Any):
with xopen(A , 'rb') as f:
_UpperCAmelCase = f.read()
return bytes_
_UpperCAmelCase = pa.array(
[
(path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_UpperCAmelCase = pa.array(
[os.path.basename(A) if path is not None else None for path in storage.field('path').to_pylist()] , type=pa.string() , )
_UpperCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(A , self.pa_type)
| 701
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : List[str] , A : str , A : Dict=7 , A : List[Any]=3 , A : Any=30 , A : Tuple=4_00 , A : Optional[Any]=True , A : Optional[int]=None , A : str=0.9 , A : Optional[Any]=None , A : Optional[Any]=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : Tuple=[0.5, 0.5, 0.5] , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 30}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 30, 'width': 30}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize_and_center_crop
_UpperCAmelCase = size
_UpperCAmelCase = crop_pct
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __snake_case , unittest.TestCase ):
UpperCamelCase = PoolFormerImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = PoolFormerImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCamelCase , 'do_resize_and_center_crop'))
self.assertTrue(hasattr(__UpperCamelCase , 'size'))
self.assertTrue(hasattr(__UpperCamelCase , 'crop_pct'))
self.assertTrue(hasattr(__UpperCamelCase , 'do_normalize'))
self.assertTrue(hasattr(__UpperCamelCase , 'image_mean'))
self.assertTrue(hasattr(__UpperCamelCase , 'image_std'))
def _lowerCamelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 30})
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30})
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCamelCase ( self : Any) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 702
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 0
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = 'Hello world! cécé herlolip'
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = FairseqRobertaModel.from_pretrained(lowerCAmelCase_ )
roberta.eval() # disable dropout
_UpperCAmelCase = roberta.model.encoder.sentence_encoder
_UpperCAmelCase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
_UpperCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('Our RoBERTa config:' , lowerCAmelCase_ )
_UpperCAmelCase = XLMRobertaXLForSequenceClassification(lowerCAmelCase_ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase_ )
model.eval()
# Now let's copy all the weights.
# Embeddings
_UpperCAmelCase = roberta_sent_encoder.embed_tokens.weight
_UpperCAmelCase = roberta_sent_encoder.embed_positions.weight
_UpperCAmelCase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
_UpperCAmelCase = roberta_sent_encoder.layer_norm.weight
_UpperCAmelCase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
_UpperCAmelCase = model.roberta.encoder.layer[i]
_UpperCAmelCase = roberta_sent_encoder.layers[i]
_UpperCAmelCase = layer.attention
_UpperCAmelCase = roberta_layer.self_attn_layer_norm.weight
_UpperCAmelCase = roberta_layer.self_attn_layer_norm.bias
# self attention
_UpperCAmelCase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
_UpperCAmelCase = roberta_layer.self_attn.q_proj.weight
_UpperCAmelCase = roberta_layer.self_attn.q_proj.bias
_UpperCAmelCase = roberta_layer.self_attn.k_proj.weight
_UpperCAmelCase = roberta_layer.self_attn.k_proj.bias
_UpperCAmelCase = roberta_layer.self_attn.v_proj.weight
_UpperCAmelCase = roberta_layer.self_attn.v_proj.bias
# self-attention output
_UpperCAmelCase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
_UpperCAmelCase = roberta_layer.self_attn.out_proj.weight
_UpperCAmelCase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
_UpperCAmelCase = roberta_layer.final_layer_norm.weight
_UpperCAmelCase = roberta_layer.final_layer_norm.bias
# intermediate
_UpperCAmelCase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
_UpperCAmelCase = roberta_layer.fca.weight
_UpperCAmelCase = roberta_layer.fca.bias
# output
_UpperCAmelCase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
_UpperCAmelCase = roberta_layer.fca.weight
_UpperCAmelCase = roberta_layer.fca.bias
# end of layer
if classification_head:
_UpperCAmelCase = roberta.model.classification_heads['''mnli'''].dense.weight
_UpperCAmelCase = roberta.model.classification_heads['''mnli'''].dense.bias
_UpperCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.weight
_UpperCAmelCase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
_UpperCAmelCase = roberta.model.encoder.lm_head.dense.weight
_UpperCAmelCase = roberta.model.encoder.lm_head.dense.bias
_UpperCAmelCase = roberta.model.encoder.lm_head.layer_norm.weight
_UpperCAmelCase = roberta.model.encoder.lm_head.layer_norm.bias
_UpperCAmelCase = roberta.model.encoder.lm_head.weight
_UpperCAmelCase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
_UpperCAmelCase = roberta.encode(lowerCAmelCase_ ).unsqueeze(0 ) # batch of size 1
_UpperCAmelCase = model(lowerCAmelCase_ )[0]
if classification_head:
_UpperCAmelCase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(lowerCAmelCase_ ) )
else:
_UpperCAmelCase = roberta.model(lowerCAmelCase_ )[0]
print(our_output.shape , their_output.shape )
_UpperCAmelCase = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
_UpperCAmelCase = torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(lowerCAmelCase_ ).mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
UpperCAmelCase__ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 703
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 0
|
from manim import *
class __lowerCAmelCase ( _UpperCAmelCase ):
'''simple docstring'''
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = Rectangle(height=0.5 , width=0.5)
_UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0)
_UpperCAmelCase = [mem.copy() for i in range(6)]
_UpperCAmelCase = [mem.copy() for i in range(6)]
_UpperCAmelCase = VGroup(*lowercase__).arrange(lowercase__ , buff=0)
_UpperCAmelCase = VGroup(*lowercase__).arrange(lowercase__ , buff=0)
_UpperCAmelCase = VGroup(lowercase__ , lowercase__).arrange(lowercase__ , buff=0)
_UpperCAmelCase = Text('CPU' , font_size=24)
_UpperCAmelCase = Group(lowercase__ , lowercase__).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__)
cpu.move_to([-2.5, -0.5, 0])
self.add(lowercase__)
_UpperCAmelCase = [mem.copy() for i in range(4)]
_UpperCAmelCase = VGroup(*lowercase__).arrange(lowercase__ , buff=0)
_UpperCAmelCase = Text('GPU' , font_size=24)
_UpperCAmelCase = Group(lowercase__ , lowercase__).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__)
gpu.move_to([-1, -1, 0])
self.add(lowercase__)
_UpperCAmelCase = [mem.copy() for i in range(6)]
_UpperCAmelCase = VGroup(*lowercase__).arrange(lowercase__ , buff=0)
_UpperCAmelCase = Text('Model' , font_size=24)
_UpperCAmelCase = Group(lowercase__ , lowercase__).arrange(lowercase__ , buff=0.5 , aligned_edge=lowercase__)
model.move_to([3, -1.0, 0])
self.add(lowercase__)
_UpperCAmelCase = []
for i, rect in enumerate(lowercase__):
rect.set_stroke(lowercase__)
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_UpperCAmelCase = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3).set_stroke(width=0.0).set_fill(lowercase__ , opacity=0.7)
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=lowercase__)
cpu_target.set_x(cpu_target.get_x() + 0.1)
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase__ , buff=0.0)
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase__ , buff=0.0)
self.add(lowercase__)
cpu_targs.append(lowercase__)
_UpperCAmelCase = [mem.copy() for i in range(6)]
_UpperCAmelCase = VGroup(*lowercase__).arrange(lowercase__ , buff=0)
_UpperCAmelCase = Text('Loaded Checkpoint' , font_size=24)
_UpperCAmelCase = Group(lowercase__ , lowercase__).arrange(lowercase__ , aligned_edge=lowercase__ , buff=0.4)
checkpoint.move_to([3, 0.5, 0])
_UpperCAmelCase = Square(side_length=2.2)
key.move_to([-5, 2, 0])
_UpperCAmelCase = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
self.add(lowercase__ , lowercase__)
_UpperCAmelCase = MarkupText(
F"<span fgcolor=\'{BLUE}\'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(lowercase__ , DOWN * 2.4 , aligned_edge=key_text.get_left())
_UpperCAmelCase = MarkupText(
F"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0])
self.play(Write(lowercase__) , Write(lowercase__))
self.play(Write(lowercase__ , run_time=1) , Create(lowercase__ , run_time=1))
_UpperCAmelCase = []
_UpperCAmelCase = []
for i, rect in enumerate(lowercase__):
_UpperCAmelCase = fill.copy().set_fill(lowercase__ , opacity=0.7)
target.move_to(lowercase__)
first_animations.append(GrowFromCenter(lowercase__ , run_time=1))
_UpperCAmelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1])
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5])
second_animations.append(MoveToTarget(lowercase__ , run_time=1.5))
self.play(*lowercase__)
self.play(*lowercase__)
self.wait()
| 704
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
UpperCAmelCase__ = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 705
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 0
|
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str = "AAPL" ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
_UpperCAmelCase = BeautifulSoup(requests.get(_UpperCAmelCase ).text , 'html.parser' )
_UpperCAmelCase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 706
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModel.from_pretrained(__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModel.from_pretrained(__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelForPreTraining.from_pretrained(__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelForPreTraining.from_pretrained(__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained(__a , from_pt=__a)
_UpperCAmelCase = TFAutoModelForCausalLM.from_pretrained(
__a , output_loading_info=__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__a , from_tf=__a)
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(
__a , output_loading_info=__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : int) -> Any:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : Optional[Any]) -> str:
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelForMaskedLM.from_pretrained(__a , from_pt=__a)
_UpperCAmelCase = TFAutoModelForMaskedLM.from_pretrained(
__a , output_loading_info=__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(__a , from_tf=__a)
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
__a , output_loading_info=__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(__a , from_pt=__a)
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(
__a , output_loading_info=__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(__a , from_tf=__a)
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
__a , output_loading_info=__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
@slow
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
_UpperCAmelCase = AutoConfig.from_pretrained(__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(__a , from_pt=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
_UpperCAmelCase = AutoModelForQuestionAnswering.from_pretrained(__a , from_tf=__a)
self.assertIsNotNone(__a)
self.assertIsInstance(__a , __a)
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a)
self.assertIsInstance(__a , __a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=__a) , 1_44_10)
_UpperCAmelCase = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a)
self.assertIsInstance(__a , __a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=__a) , 1_44_10)
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = TFAutoModelWithLMHead.from_pretrained(__a , from_pt=__a)
self.assertIsInstance(__a , __a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=__a) , 1_44_10)
_UpperCAmelCase = AutoModelWithLMHead.from_pretrained(__a , from_tf=__a)
self.assertIsInstance(__a , __a)
self.assertEqual(model.num_parameters() , 1_44_10)
self.assertEqual(model.num_parameters(only_trainable=__a) , 1_44_10)
| 707
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 0
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "▁"
UpperCAmelCase__ = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
UpperCAmelCase__ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __lowerCAmelCase ( UpperCAmelCase_ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = ["input_ids", "attention_mask"]
UpperCamelCase = []
UpperCamelCase = []
def __init__( self : Tuple , A : str , A : List[str]="<s>" , A : int="</s>" , A : Optional[Any]="</s>" , A : Union[str, Any]="<s>" , A : Any="<unk>" , A : str="<pad>" , A : Optional[Any]="<mask>" , A : Tuple=None , A : Optional[int]=None , A : Optional[Any]=None , A : Optional[Dict[str, Any]] = None , A : List[str]=None , A : Optional[int]=False , **A : List[str] , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case) if isinstance(_snake_case , _snake_case) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_UpperCAmelCase = legacy_behaviour
super().__init__(
bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , tokenizer_file=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_snake_case , **_snake_case , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(_snake_case))
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
_UpperCAmelCase = len(self.sp_model)
_UpperCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_snake_case)
}
_UpperCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_UpperCAmelCase = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_UpperCAmelCase = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
_UpperCAmelCase = src_lang if src_lang is not None else 'eng_Latn'
_UpperCAmelCase = self.lang_code_to_id[self._src_lang]
_UpperCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , A : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self : Any , A : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def _lowerCamelCase ( self : Optional[Any] , A : List[int] , A : Optional[List[int]] = None , A : bool = False) -> str:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case)
_UpperCAmelCase = [1] * len(self.prefix_tokens)
_UpperCAmelCase = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(_snake_case)) + suffix_ones
return prefix_ones + ([0] * len(_snake_case)) + ([0] * len(_snake_case)) + suffix_ones
def _lowerCamelCase ( self : str , A : List[int] , A : Optional[List[int]] = None) -> Optional[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self : str , A : List[int] , A : Optional[List[int]] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self : List[Any] , A : Any , A : str , A : Optional[str] , A : Optional[str] , **A : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
_UpperCAmelCase = src_lang
_UpperCAmelCase = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case)
_UpperCAmelCase = self.convert_tokens_to_ids(_snake_case)
_UpperCAmelCase = tgt_lang_id
return inputs
def _lowerCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_UpperCAmelCase = {self.convert_ids_to_tokens(_snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _lowerCamelCase ( self : Any , A : str) -> Dict:
"""simple docstring"""
return self.sp_model.encode(_snake_case , out_type=_snake_case)
def _lowerCamelCase ( self : str , A : List[str]) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(_snake_case)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self : Optional[Any] , A : List[Any]) -> int:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _lowerCamelCase ( self : Optional[int] , A : Dict) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = ''.join(_snake_case).replace(_snake_case , ' ').strip()
return out_string
def _lowerCamelCase ( self : Optional[Any] , A : str , A : Optional[str] = None) -> Dict:
"""simple docstring"""
if not os.path.isdir(_snake_case):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
_snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _snake_case)
elif not os.path.isfile(self.vocab_file):
with open(_snake_case , 'wb') as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_snake_case)
return (out_vocab_file,)
def _lowerCamelCase ( self : Tuple , A : List[str] , A : str = "eng_Latn" , A : Optional[List[str]] = None , A : str = "fra_Latn" , **A : Dict , ) -> str:
"""simple docstring"""
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case)
def _lowerCamelCase ( self : int) -> Any:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def _lowerCamelCase ( self : List[str] , A : Tuple) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCAmelCase = [self.cur_lang_code]
_UpperCAmelCase = [self.eos_token_id]
def _lowerCamelCase ( self : List[Any] , A : str) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_UpperCAmelCase = []
_UpperCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_UpperCAmelCase = [self.cur_lang_code]
_UpperCAmelCase = [self.eos_token_id]
| 708
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 0
|
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class __lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : Dict , A : str , A : List[Any] , A : List[str]=False) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase)
if return_labels:
if model_class in get_values(_lowerCAmelCase):
_UpperCAmelCase = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class __lowerCAmelCase ( _lowerCAmelCase ):
def __init__( self : int , A : Union[str, Any] , A : Tuple=13 , A : Dict=7 , A : Optional[int]=True , A : List[str]=True , A : Dict=True , A : Tuple=True , A : int=99 , A : Optional[int]=32 , A : Tuple=32 , A : Union[str, Any]=2 , A : Optional[int]=4 , A : int=37 , A : List[Any]="gelu" , A : Any=0.1 , A : Union[str, Any]=0.1 , A : Optional[int]=5_12 , A : List[str]=16 , A : Any=2 , A : List[str]=0.0_2 , A : Dict=3 , A : int=4 , A : str=None , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
_UpperCAmelCase = embedding_size
def _lowerCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCAmelCase = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[int] , A : Optional[Any] , A : int , A : int , A : Any , A : Tuple , A : List[str] , A : Dict) -> Dict:
"""simple docstring"""
_UpperCAmelCase = TFMobileBertModel(config=_lowerCAmelCase)
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = model(_lowerCAmelCase)
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(_lowerCAmelCase)
_UpperCAmelCase = model(_lowerCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def _lowerCamelCase ( self : Dict , A : Optional[int] , A : List[Any] , A : Optional[int] , A : Optional[Any] , A : List[str] , A : Tuple , A : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase = TFMobileBertForMaskedLM(config=_lowerCAmelCase)
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = model(_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : Union[str, Any] , A : str , A : Any , A : str , A : Optional[Any] , A : int , A : List[Any] , A : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = TFMobileBertForNextSentencePrediction(config=_lowerCAmelCase)
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = model(_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def _lowerCamelCase ( self : List[str] , A : Tuple , A : Optional[Any] , A : List[Any] , A : str , A : Dict , A : int , A : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = TFMobileBertForPreTraining(config=_lowerCAmelCase)
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = model(_lowerCAmelCase)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def _lowerCamelCase ( self : Optional[Any] , A : List[Any] , A : Optional[Any] , A : List[str] , A : List[Any] , A : Union[str, Any] , A : Union[str, Any] , A : Optional[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForSequenceClassification(config=_lowerCAmelCase)
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = model(_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : List[Any] , A : Tuple , A : Any , A : Optional[Any] , A : Union[str, Any] , A : int , A : Union[str, Any] , A : Dict) -> int:
"""simple docstring"""
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFMobileBertForMultipleChoice(config=_lowerCAmelCase)
_UpperCAmelCase = tf.tile(tf.expand_dims(_lowerCAmelCase , 1) , (1, self.num_choices, 1))
_UpperCAmelCase = tf.tile(tf.expand_dims(_lowerCAmelCase , 1) , (1, self.num_choices, 1))
_UpperCAmelCase = tf.tile(tf.expand_dims(_lowerCAmelCase , 1) , (1, self.num_choices, 1))
_UpperCAmelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCamelCase ( self : Dict , A : Dict , A : Optional[int] , A : str , A : Tuple , A : Dict , A : Optional[Any] , A : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFMobileBertForTokenClassification(config=_lowerCAmelCase)
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = model(_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : int , A : Union[str, Any] , A : str , A : str , A : List[str] , A : str , A : Any , A : str) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFMobileBertForQuestionAnswering(config=_lowerCAmelCase)
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = model(_lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def _lowerCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = TFMobileBertModelTest.TFMobileBertModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37)
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[int]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCAmelCase)
def _lowerCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCAmelCase)
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCAmelCase)
def _lowerCamelCase ( self : str) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCAmelCase)
def _lowerCamelCase ( self : int) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCAmelCase)
def _lowerCamelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCAmelCase)
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCAmelCase)
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCAmelCase)
@slow
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
_UpperCAmelCase = TFMobileBertModel.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased')
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]])
_UpperCAmelCase = model(_lowerCAmelCase)[0]
_UpperCAmelCase = [1, 6, 3_05_22]
self.assertEqual(output.shape , _lowerCAmelCase)
_UpperCAmelCase = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4)
| 709
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 0
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Any=0.999 , _UpperCAmelCase : Optional[Any]="cosine" , ) -> Any:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCAmelCase : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCAmelCase : int ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
_UpperCAmelCase = []
for i in range(snake_case_ ):
_UpperCAmelCase = i / num_diffusion_timesteps
_UpperCAmelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case_ ) / alpha_bar_fn(snake_case_ ) , snake_case_ ) )
return torch.tensor(snake_case_ , dtype=torch.floataa )
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase = 2
@register_to_config
def __init__( self : str , A : str = 10_00 , A : Dict = 0.0_0_0_8_5 , A : List[str] = 0.0_1_2 , A : str = "linear" , A : List[str] = None , A : List[Any] = "epsilon" , A : Dict = False , A : Tuple = False , A : Optional[int] = 1.0 , A : Union[str, Any] = "linspace" , A : int = 0 , ) -> int:
"""simple docstring"""
if trained_betas is not None:
_UpperCAmelCase = torch.tensor(_a , dtype=torch.floataa)
elif beta_schedule == "linear":
_UpperCAmelCase = torch.linspace(_a , _a , _a , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase = betas_for_alpha_bar(_a , alpha_transform_type='cosine')
elif beta_schedule == "exp":
_UpperCAmelCase = betas_for_alpha_bar(_a , alpha_transform_type='exp')
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}")
_UpperCAmelCase = 1.0 - self.betas
_UpperCAmelCase = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(_a , _a , _a)
_UpperCAmelCase = use_karras_sigmas
def _lowerCamelCase ( self : Dict , A : Any , A : Optional[Any]=None) -> str:
"""simple docstring"""
if schedule_timesteps is None:
_UpperCAmelCase = self.timesteps
_UpperCAmelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
_UpperCAmelCase = 1 if len(_a) > 1 else 0
else:
_UpperCAmelCase = timestep.cpu().item() if torch.is_tensor(_a) else timestep
_UpperCAmelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def _lowerCamelCase ( self : Optional[Any] , A : Dict , A : Any , ) -> torch.FloatTensor:
"""simple docstring"""
_UpperCAmelCase = self.index_for_timestep(_a)
_UpperCAmelCase = self.sigmas[step_index]
_UpperCAmelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def _lowerCamelCase ( self : str , A : List[str] , A : Tuple = None , A : Any = None , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_UpperCAmelCase = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a)[::-1].copy()
elif self.config.timestep_spacing == "leading":
_UpperCAmelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(0 , _a) * step_ratio).round()[::-1].copy().astype(_a)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_UpperCAmelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase = (np.arange(_a , 0 , -step_ratio)).round().copy().astype(_a)
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.")
_UpperCAmelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
_UpperCAmelCase = np.log(_a)
_UpperCAmelCase = np.interp(_a , np.arange(0 , len(_a)) , _a)
if self.config.use_karras_sigmas:
_UpperCAmelCase = self._convert_to_karras(in_sigmas=_a , num_inference_steps=self.num_inference_steps)
_UpperCAmelCase = np.array([self._sigma_to_t(_a , _a) for sigma in sigmas])
_UpperCAmelCase = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
_UpperCAmelCase = torch.from_numpy(_a).to(device=_a)
_UpperCAmelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]])
_UpperCAmelCase = torch.from_numpy(_a)
_UpperCAmelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)])
if str(_a).startswith('mps'):
# mps does not support float64
_UpperCAmelCase = timesteps.to(_a , dtype=torch.floataa)
else:
_UpperCAmelCase = timesteps.to(device=_a)
# empty dt and derivative
_UpperCAmelCase = None
_UpperCAmelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_UpperCAmelCase = defaultdict(_a)
def _lowerCamelCase ( self : str , A : int , A : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = np.log(_a)
# get distribution
_UpperCAmelCase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_UpperCAmelCase = np.cumsum((dists >= 0) , axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
_UpperCAmelCase = low_idx + 1
_UpperCAmelCase = log_sigmas[low_idx]
_UpperCAmelCase = log_sigmas[high_idx]
# interpolate sigmas
_UpperCAmelCase = (low - log_sigma) / (low - high)
_UpperCAmelCase = np.clip(_a , 0 , 1)
# transform interpolation to time range
_UpperCAmelCase = (1 - w) * low_idx + w * high_idx
_UpperCAmelCase = t.reshape(sigma.shape)
return t
def _lowerCamelCase ( self : List[str] , A : Optional[Any] , A : List[Any]) -> torch.FloatTensor:
"""simple docstring"""
_UpperCAmelCase = in_sigmas[-1].item()
_UpperCAmelCase = in_sigmas[0].item()
_UpperCAmelCase = 7.0 # 7.0 is the value used in the paper
_UpperCAmelCase = np.linspace(0 , 1 , _a)
_UpperCAmelCase = sigma_min ** (1 / rho)
_UpperCAmelCase = sigma_max ** (1 / rho)
_UpperCAmelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def _lowerCamelCase ( self : str) -> Dict:
"""simple docstring"""
return self.dt is None
def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : str , A : Dict , A : Optional[Any] = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
_UpperCAmelCase = self.index_for_timestep(_a)
# advance index counter by 1
_UpperCAmelCase = timestep.cpu().item() if torch.is_tensor(_a) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_UpperCAmelCase = self.sigmas[step_index]
_UpperCAmelCase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_UpperCAmelCase = self.sigmas[step_index - 1]
_UpperCAmelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_UpperCAmelCase = 0
_UpperCAmelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_UpperCAmelCase = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_UpperCAmelCase = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`")
if self.config.clip_sample:
_UpperCAmelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_UpperCAmelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_UpperCAmelCase = sigma_next - sigma_hat
# store for 2nd order step
_UpperCAmelCase = derivative
_UpperCAmelCase = dt
_UpperCAmelCase = sample
else:
# 2. 2nd order / Heun's method
_UpperCAmelCase = (sample - pred_original_sample) / sigma_next
_UpperCAmelCase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_UpperCAmelCase = self.dt
_UpperCAmelCase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a)
def _lowerCamelCase ( self : Dict , A : Any , A : str , A : Union[str, Any] , ) -> torch.FloatTensor:
"""simple docstring"""
_UpperCAmelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(_a):
# mps does not support float64
_UpperCAmelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa)
_UpperCAmelCase = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
_UpperCAmelCase = self.timesteps.to(original_samples.device)
_UpperCAmelCase = timesteps.to(original_samples.device)
_UpperCAmelCase = [self.index_for_timestep(_a , _a) for t in timesteps]
_UpperCAmelCase = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
_UpperCAmelCase = sigma.unsqueeze(-1)
_UpperCAmelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self : int) -> List[str]:
"""simple docstring"""
return self.config.num_train_timesteps
| 710
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
UpperCamelCase = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
UpperCamelCase = '''question'''
UpperCamelCase = '''context'''
UpperCamelCase = '''answers'''
@property
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 711
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 0
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self : List[str] , *A : Any , **A : Dict) -> None:
"""simple docstring"""
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase)
| 712
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 0
|
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
_UpperCAmelCase = len(__snake_case )
_UpperCAmelCase = len(__snake_case )
_UpperCAmelCase = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_UpperCAmelCase = []
for char_count in range(__snake_case ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__snake_case )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 0
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __lowerCAmelCase ( UpperCamelCase__ ):
UpperCamelCase = 4_2
UpperCamelCase = jnp.floataa
UpperCamelCase = True
def _lowerCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
super().setup()
_UpperCAmelCase = nn.Dense(5 , dtype=self.dtype)
def __call__( self : Union[str, Any] , *A : Optional[Any] , **A : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = super().__call__(*__A , **__A)
_UpperCAmelCase = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class __lowerCAmelCase ( UpperCamelCase__ ):
UpperCamelCase = FlaxBigBirdForNaturalQuestionsModule
def _A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
def cross_entropy(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=None ):
_UpperCAmelCase = logits.shape[-1]
_UpperCAmelCase = (labels[..., None] == jnp.arange(UpperCamelCase__ )[None]).astype('f4' )
_UpperCAmelCase = jax.nn.log_softmax(UpperCamelCase__ , axis=-1 )
_UpperCAmelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
_UpperCAmelCase = reduction(UpperCamelCase__ )
return loss
_UpperCAmelCase = partial(UpperCamelCase__ , reduction=jnp.mean )
_UpperCAmelCase = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = cross_entropy(UpperCamelCase__ , UpperCamelCase__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __lowerCAmelCase :
UpperCamelCase = '''google/bigbird-roberta-base'''
UpperCamelCase = 3_0_0_0
UpperCamelCase = 1_0_5_0_0
UpperCamelCase = 1_2_8
UpperCamelCase = 3
UpperCamelCase = 1
UpperCamelCase = 5
# tx_args
UpperCamelCase = 3e-5
UpperCamelCase = 0.0
UpperCamelCase = 2_0_0_0_0
UpperCamelCase = 0.0_095
UpperCamelCase = '''bigbird-roberta-natural-questions'''
UpperCamelCase = '''training-expt'''
UpperCamelCase = '''data/nq-training.jsonl'''
UpperCamelCase = '''data/nq-validation.jsonl'''
def _lowerCamelCase ( self : int) -> List[str]:
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=__A)
_UpperCAmelCase = os.path.join(self.base_dir , self.save_dir)
_UpperCAmelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class __lowerCAmelCase :
UpperCamelCase = 4_2
UpperCamelCase = 4_0_9_6 # no dynamic padding on TPUs
def __call__( self : str , A : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.collate_fn(__A)
_UpperCAmelCase = jax.tree_util.tree_map(__A , __A)
return batch
def _lowerCamelCase ( self : List[str] , A : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.fetch_inputs(features['input_ids'])
_UpperCAmelCase = {
'input_ids': jnp.array(__A , dtype=jnp.intaa),
'attention_mask': jnp.array(__A , dtype=jnp.intaa),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa),
}
return batch
def _lowerCamelCase ( self : List[Any] , A : list) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = [self._fetch_inputs(__A) for ids in input_ids]
return zip(*__A)
def _lowerCamelCase ( self : Union[str, Any] , A : list) -> str:
"""simple docstring"""
_UpperCAmelCase = [1 for _ in range(len(__A))]
while len(__A) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def _A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : int=None ) -> Dict:
'''simple docstring'''
if seed is not None:
_UpperCAmelCase = dataset.shuffle(seed=UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) // batch_size ):
_UpperCAmelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase__ )
@partial(jax.pmap , axis_name='batch' )
def _A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , **_UpperCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
def loss_fn(_UpperCAmelCase : Any ):
_UpperCAmelCase = model_inputs.pop('start_labels' )
_UpperCAmelCase = model_inputs.pop('end_labels' )
_UpperCAmelCase = model_inputs.pop('pooled_labels' )
_UpperCAmelCase = state.apply_fn(**UpperCamelCase__ , params=UpperCamelCase__ , dropout_rng=UpperCamelCase__ , train=UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = outputs
return state.loss_fn(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
_UpperCAmelCase , _UpperCAmelCase = jax.random.split(UpperCamelCase__ )
_UpperCAmelCase = jax.value_and_grad(UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase = grad_fn(state.params )
_UpperCAmelCase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
_UpperCAmelCase = jax.lax.pmean(UpperCamelCase__ , 'batch' )
_UpperCAmelCase = state.apply_gradients(grads=UpperCamelCase__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def _A ( _UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = model_inputs.pop('start_labels' )
_UpperCAmelCase = model_inputs.pop('end_labels' )
_UpperCAmelCase = model_inputs.pop('pooled_labels' )
_UpperCAmelCase = state.apply_fn(**UpperCamelCase__ , params=state.params , train=UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = outputs
_UpperCAmelCase = state.loss_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class __lowerCAmelCase ( train_state.TrainState ):
UpperCamelCase = struct.field(pytree_node=UpperCamelCase__ )
@dataclass
class __lowerCAmelCase :
UpperCamelCase = 4_2
UpperCamelCase = 4_2
UpperCamelCase = 4_2
UpperCamelCase = 4_2
UpperCamelCase = 4_2
UpperCamelCase = 4_2
UpperCamelCase = None
def _lowerCamelCase ( self : Dict , A : int , A : str , A : Optional[Any] , A : Union[str, Any]=None) -> str:
"""simple docstring"""
_UpperCAmelCase = model.params
_UpperCAmelCase = TrainState.create(
apply_fn=model.__call__ , params=__A , tx=__A , loss_fn=__A , )
if ckpt_dir is not None:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = restore_checkpoint(__A , __A)
_UpperCAmelCase = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
_UpperCAmelCase , _UpperCAmelCase = build_tx(**__A)
_UpperCAmelCase = train_state.TrainState(
step=__A , apply_fn=model.__call__ , params=__A , tx=__A , opt_state=__A , )
_UpperCAmelCase = args
_UpperCAmelCase = data_collator
_UpperCAmelCase = lr
_UpperCAmelCase = params
_UpperCAmelCase = jax_utils.replicate(__A)
return state
def _lowerCamelCase ( self : Union[str, Any] , A : Any , A : Optional[Any] , A : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.args
_UpperCAmelCase = len(__A) // args.batch_size
_UpperCAmelCase = jax.random.PRNGKey(0)
_UpperCAmelCase = jax.random.split(__A , jax.device_count())
for epoch in range(args.max_epochs):
_UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa)
_UpperCAmelCase = get_batched_dataset(__A , args.batch_size , seed=__A)
_UpperCAmelCase = 0
for batch in tqdm(__A , total=__A , desc=F"Running EPOCH-{epoch}"):
_UpperCAmelCase = self.data_collator(__A)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.train_step_fn(__A , __A , **__A)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
if i % args.logging_steps == 0:
_UpperCAmelCase = jax_utils.unreplicate(state.step)
_UpperCAmelCase = running_loss.item() / i
_UpperCAmelCase = self.scheduler_fn(state_step - 1)
_UpperCAmelCase = self.evaluate(__A , __A)
_UpperCAmelCase = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(__A))
self.logger.log(__A , commit=__A)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"-e{epoch}-s{i}" , state=__A)
def _lowerCamelCase ( self : str , A : Union[str, Any] , A : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = get_batched_dataset(__A , self.args.batch_size)
_UpperCAmelCase = len(__A) // self.args.batch_size
_UpperCAmelCase = jnp.array(0 , dtype=jnp.floataa)
_UpperCAmelCase = 0
for batch in tqdm(__A , total=__A , desc='Evaluating ... '):
_UpperCAmelCase = self.data_collator(__A)
_UpperCAmelCase = self.val_step_fn(__A , **__A)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
return running_loss / i
def _lowerCamelCase ( self : int , A : int , A : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = jax_utils.unreplicate(__A)
print(F"SAVING CHECKPOINT IN {save_dir}" , end=' ... ')
self.model_save_fn(__A , params=state.params)
with open(os.path.join(__A , 'opt_state.msgpack') , 'wb') as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(__A , 'args.joblib'))
joblib.dump(self.data_collator , os.path.join(__A , 'data_collator.joblib'))
with open(os.path.join(__A , 'training_state.json') , 'w') as f:
json.dump({'step': state.step.item()} , __A)
print('DONE')
def _A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
print(F"RESTORING CHECKPOINT FROM {save_dir}" , end=' ... ' )
with open(os.path.join(UpperCamelCase__ , 'flax_model.msgpack' ) , 'rb' ) as f:
_UpperCAmelCase = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase__ , 'opt_state.msgpack' ) , 'rb' ) as f:
_UpperCAmelCase = from_bytes(state.opt_state , f.read() )
_UpperCAmelCase = joblib.load(os.path.join(UpperCamelCase__ , 'args.joblib' ) )
_UpperCAmelCase = joblib.load(os.path.join(UpperCamelCase__ , 'data_collator.joblib' ) )
with open(os.path.join(UpperCamelCase__ , 'training_state.json' ) , 'r' ) as f:
_UpperCAmelCase = json.load(UpperCamelCase__ )
_UpperCAmelCase = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def _A ( _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = num_train_steps - warmup_steps
_UpperCAmelCase = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=UpperCamelCase__ , transition_steps=UpperCamelCase__ )
_UpperCAmelCase = optax.linear_schedule(init_value=UpperCamelCase__ , end_value=1E-7 , transition_steps=UpperCamelCase__ )
_UpperCAmelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
def weight_decay_mask(_UpperCAmelCase : Optional[Any] ):
_UpperCAmelCase = traverse_util.flatten_dict(UpperCamelCase__ )
_UpperCAmelCase = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase__ )
_UpperCAmelCase = scheduler_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = optax.adamw(learning_rate=UpperCamelCase__ , weight_decay=UpperCamelCase__ , mask=UpperCamelCase__ )
return tx, lr
| 715
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''ViTFeatureExtractor''']
UpperCAmelCase__ = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 716
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 0
|
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = 0
for ch in input_str:
_UpperCAmelCase = ord(_UpperCAmelCase )
_UpperCAmelCase = pow(2 , _UpperCAmelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = BlenderbotSmallTokenizer
UpperCamelCase = False
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
super().setUp()
_UpperCAmelCase = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
_UpperCAmelCase = dict(zip(__A , range(len(__A))))
_UpperCAmelCase = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
_UpperCAmelCase = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(__A) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(__A))
def _lowerCamelCase ( self : str , **A : Optional[Any]) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__A)
def _lowerCamelCase ( self : str , A : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = 'adapt act apte'
_UpperCAmelCase = 'adapt act apte'
return input_text, output_text
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_UpperCAmelCase = 'adapt act apte'
_UpperCAmelCase = ['adapt', 'act', 'ap@@', 'te']
_UpperCAmelCase = tokenizer.tokenize(__A)
self.assertListEqual(__A , __A)
_UpperCAmelCase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_UpperCAmelCase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A) , __A)
def _lowerCamelCase ( self : Any) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
assert tok('sam').input_ids == [13_84]
_UpperCAmelCase = 'I am a small frog.'
_UpperCAmelCase = tok([src_text] , padding=__A , truncation=__A)['input_ids']
_UpperCAmelCase = tok.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowerCamelCase ( self : Any) -> int:
"""simple docstring"""
_UpperCAmelCase = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M')
_UpperCAmelCase = 'I am a small frog .'
_UpperCAmelCase = '.'
_UpperCAmelCase = tok(__A)['input_ids']
_UpperCAmelCase = tok(__A)['input_ids']
assert encoded[-1] == encoded_dot[0]
| 718
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 0
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __lowerCAmelCase ( __snake_case ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''OwlViTImageProcessor'''
UpperCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : str , A : int=None , A : str=None , **A : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowercase , )
_UpperCAmelCase = kwargs.pop('feature_extractor')
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_lowercase , _lowercase)
def __call__( self : List[str] , A : str=None , A : Tuple=None , A : Any=None , A : str="max_length" , A : Dict="np" , **A : List[Any]) -> Dict:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.')
if text is not None:
if isinstance(_lowercase , _lowercase) or (isinstance(_lowercase , _lowercase) and not isinstance(text[0] , _lowercase)):
_UpperCAmelCase = [self.tokenizer(_lowercase , padding=_lowercase , return_tensors=_lowercase , **_lowercase)]
elif isinstance(_lowercase , _lowercase) and isinstance(text[0] , _lowercase):
_UpperCAmelCase = []
# Maximum number of queries across batch
_UpperCAmelCase = max([len(_lowercase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowercase) != max_num_queries:
_UpperCAmelCase = t + [""" """] * (max_num_queries - len(_lowercase))
_UpperCAmelCase = self.tokenizer(_lowercase , padding=_lowercase , return_tensors=_lowercase , **_lowercase)
encodings.append(_lowercase)
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings')
if return_tensors == "np":
_UpperCAmelCase = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
_UpperCAmelCase = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCAmelCase = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
_UpperCAmelCase = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCAmelCase = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0)
_UpperCAmelCase = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCAmelCase = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0)
_UpperCAmelCase = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0)
else:
raise ValueError('Target return tensor type could not be returned')
_UpperCAmelCase = BatchEncoding()
_UpperCAmelCase = input_ids
_UpperCAmelCase = attention_mask
if query_images is not None:
_UpperCAmelCase = BatchEncoding()
_UpperCAmelCase = self.image_processor(
_lowercase , return_tensors=_lowercase , **_lowercase).pixel_values
_UpperCAmelCase = query_pixel_values
if images is not None:
_UpperCAmelCase = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase)
if text is not None and images is not None:
_UpperCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase) , tensor_type=_lowercase)
def _lowerCamelCase ( self : Union[str, Any] , *A : Union[str, Any] , **A : Tuple) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process(*_lowercase , **_lowercase)
def _lowerCamelCase ( self : Dict , *A : Optional[int] , **A : Optional[int]) -> str:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_lowercase , **_lowercase)
def _lowerCamelCase ( self : Any , *A : Optional[Any] , **A : Any) -> Dict:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_lowercase , **_lowercase)
def _lowerCamelCase ( self : Any , *A : str , **A : Optional[Any]) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowercase , **_lowercase)
def _lowerCamelCase ( self : int , *A : Any , **A : Dict) -> int:
"""simple docstring"""
return self.tokenizer.decode(*_lowercase , **_lowercase)
@property
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowercase , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowercase , )
return self.image_processor
| 719
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 720
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 0
|
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase = 'vision-encoder-decoder'
UpperCamelCase = True
def __init__( self : Optional[int] , **A : Optional[Any]) -> Any:
"""simple docstring"""
super().__init__(**_a)
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}")
_UpperCAmelCase = kwargs.pop('encoder')
_UpperCAmelCase = encoder_config.pop('model_type')
_UpperCAmelCase = kwargs.pop('decoder')
_UpperCAmelCase = decoder_config.pop('model_type')
_UpperCAmelCase = AutoConfig.for_model(_a , **_a)
_UpperCAmelCase = AutoConfig.for_model(_a , **_a)
_UpperCAmelCase = True
@classmethod
def _lowerCamelCase ( cls : Optional[int] , A : Dict , A : List[str] , **A : List[str]) -> List[Any]:
"""simple docstring"""
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')
_UpperCAmelCase = True
_UpperCAmelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_a)
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = copy.deepcopy(self.__dict__)
_UpperCAmelCase = self.encoder.to_dict()
_UpperCAmelCase = self.decoder.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase = version.parse('''1.11''' )
@property
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
return 1E-4
@property
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}})
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_UpperCAmelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_UpperCAmelCase = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def _lowerCamelCase ( self : Tuple , A : Optional[int] , A : int = -1 , A : List[str] = -1 , A : str = False , A : int = None , ) -> Tuple:
"""simple docstring"""
import torch
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = super().generate_dummy_inputs(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a)
_UpperCAmelCase , _UpperCAmelCase = dummy_input['input_ids'].shape
_UpperCAmelCase = (batch, encoder_sequence, self._config.encoder_hidden_size)
_UpperCAmelCase = dummy_input.pop('input_ids')
_UpperCAmelCase = dummy_input.pop('attention_mask')
_UpperCAmelCase = torch.zeros(_a)
return common_inputs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
@property
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
pass
def _lowerCamelCase ( self : List[Any] , A : Any) -> str:
"""simple docstring"""
return VisionEncoderDecoderEncoderOnnxConfig(_a)
def _lowerCamelCase ( self : int , A : List[str] , A : List[str] , A : int = "default") -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_a , _a)
| 721
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 0
|
import requests
from bsa import BeautifulSoup
def A ( _UpperCAmelCase : str = "AAPL" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
_UpperCAmelCase = BeautifulSoup(requests.get(_lowerCamelCase ).text , 'html.parser' )
_UpperCAmelCase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 700
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 0
|
def A ( _UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = len(_UpperCAmelCase )
while cur > 1:
# Find the maximum number in arr
_UpperCAmelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_UpperCAmelCase )]
# Reverse whole list
_UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_UpperCAmelCase )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 701
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 0
|
import random
from .binary_exp_mod import bin_exp_mod
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Dict=1_000 ) -> Any:
'''simple docstring'''
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
_UpperCAmelCase = n - 1
_UpperCAmelCase = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
_UpperCAmelCase = 0
while count < prec:
_UpperCAmelCase = random.randint(2 , n - 1 )
_UpperCAmelCase = bin_exp_mod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if b != 1:
_UpperCAmelCase = True
for _ in range(SCREAMING_SNAKE_CASE_ ):
if b == n - 1:
_UpperCAmelCase = False
break
_UpperCAmelCase = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = abs(int(input("Enter bound : ").strip()))
print("Here\'s the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 702
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 0
|
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and number_of_steps > 0
), F"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
_UpperCAmelCase = 1, 1
for _ in range(number_of_steps - 1 ):
_UpperCAmelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 0
|
def A ( _UpperCAmelCase : list ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = len(__lowercase )
for _ in range(__lowercase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_UpperCAmelCase = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
UpperCAmelCase__ = list(range(10, 0, -1))
print(f"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 704
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 0
|
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__lowercase ):
UpperCamelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self : int , *A : List[str] , **A : List[Any]) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['transformers', 'torch', 'note_seq'])
@classmethod
def _lowerCamelCase ( cls : List[str] , *A : str , **A : Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'])
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *A : List[Any] , **A : Tuple) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'])
| 705
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 0
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def A ( _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def A ( _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
_UpperCAmelCase = np.max(_outputs , axis=-1 , keepdims=lowerCAmelCase__ )
_UpperCAmelCase = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowerCAmelCase__ )
class __lowerCAmelCase ( __a ):
UpperCamelCase = '''sigmoid'''
UpperCamelCase = '''softmax'''
UpperCamelCase = '''none'''
@add_end_docstrings(
__a , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class __lowerCAmelCase ( __a ):
UpperCamelCase = False
UpperCamelCase = ClassificationFunction.NONE
def __init__( self : Tuple , **A : int) -> List[Any]:
"""simple docstring"""
super().__init__(**snake_case__)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
def _lowerCamelCase ( self : Tuple , A : List[str]=None , A : List[str]=None , A : Optional[int]="" , **A : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = tokenizer_kwargs
_UpperCAmelCase = {}
if hasattr(self.model.config , 'return_all_scores') and return_all_scores is None:
_UpperCAmelCase = self.model.config.return_all_scores
if isinstance(snake_case__ , snake_case__) or top_k is None:
_UpperCAmelCase = top_k
_UpperCAmelCase = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , snake_case__ , )
if return_all_scores:
_UpperCAmelCase = None
else:
_UpperCAmelCase = 1
if isinstance(snake_case__ , snake_case__):
_UpperCAmelCase = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_UpperCAmelCase = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , *A : Tuple , **A : Optional[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = super().__call__(*snake_case__ , **snake_case__)
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_UpperCAmelCase = 'top_k' not in kwargs
if isinstance(args[0] , snake_case__) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowerCamelCase ( self : str , A : List[str] , **A : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self.framework
if isinstance(snake_case__ , snake_case__):
return self.tokenizer(**snake_case__ , return_tensors=snake_case__ , **snake_case__)
elif isinstance(snake_case__ , snake_case__) and len(snake_case__) == 1 and isinstance(inputs[0] , snake_case__) and len(inputs[0]) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=snake_case__ , **snake_case__)
elif isinstance(snake_case__ , snake_case__):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.')
return self.tokenizer(snake_case__ , return_tensors=snake_case__ , **snake_case__)
def _lowerCamelCase ( self : Dict , A : List[str]) -> Optional[int]:
"""simple docstring"""
return self.model(**snake_case__)
def _lowerCamelCase ( self : Optional[int] , A : Optional[Any] , A : Dict=None , A : Optional[Any]=1 , A : List[Any]=True) -> Tuple:
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_UpperCAmelCase = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_UpperCAmelCase = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply') and function_to_apply is None:
_UpperCAmelCase = self.model.config.function_to_apply
else:
_UpperCAmelCase = ClassificationFunction.NONE
_UpperCAmelCase = model_outputs['logits'][0]
_UpperCAmelCase = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_UpperCAmelCase = sigmoid(snake_case__)
elif function_to_apply == ClassificationFunction.SOFTMAX:
_UpperCAmelCase = softmax(snake_case__)
elif function_to_apply == ClassificationFunction.NONE:
_UpperCAmelCase = outputs
else:
raise ValueError(F"Unrecognized `function_to_apply` argument: {function_to_apply}")
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_UpperCAmelCase = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(snake_case__)
]
if not _legacy:
dict_scores.sort(key=lambda A: x["score"] , reverse=snake_case__)
if top_k is not None:
_UpperCAmelCase = dict_scores[:top_k]
return dict_scores
| 706
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : List[str] , A : Optional[int] , A : List[str]=7 , A : List[str]=3 , A : Optional[int]=30 , A : Dict=4_00 , A : Optional[int]=True , A : int=None , A : Dict=True , A : List[Any]=[0.5, 0.5, 0.5] , A : str=[0.5, 0.5, 0.5] , A : Dict=True , A : Tuple=1 / 2_55 , A : Optional[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self : Union[str, Any] , A : Any , A : List[Any]=False) -> Optional[int]:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(a_ , Image.Image):
_UpperCAmelCase = image.size
else:
_UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w)
_UpperCAmelCase = self.size["shortest_edge"]
elif w > h:
_UpperCAmelCase = self.size["shortest_edge"]
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h)
else:
_UpperCAmelCase = self.size["shortest_edge"]
_UpperCAmelCase = self.size["shortest_edge"]
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
_UpperCAmelCase = max(a_ , key=lambda A: item[0])[0]
_UpperCAmelCase = max(a_ , key=lambda A: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( __lowerCamelCase , unittest.TestCase ):
UpperCamelCase = DeformableDetrImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a_ , 'image_mean'))
self.assertTrue(hasattr(a_ , 'image_std'))
self.assertTrue(hasattr(a_ , 'do_normalize'))
self.assertTrue(hasattr(a_ , 'do_resize'))
self.assertTrue(hasattr(a_ , 'do_rescale'))
self.assertTrue(hasattr(a_ , 'do_pad'))
self.assertTrue(hasattr(a_ , 'size'))
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33})
self.assertEqual(image_processor.do_pad , a_)
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=a_)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , a_)
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_)
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(a_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ , batched=a_)
_UpperCAmelCase = image_processing(a_ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_)
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(a_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(a_ , return_tensors='pt').pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ , batched=a_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_)
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(a_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(a_ , return_tensors='pt').pixel_values
_UpperCAmelCase = self.image_processor_tester.get_expected_values(a_ , batched=a_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
_UpperCAmelCase = json.loads(f.read())
_UpperCAmelCase = {"image_id": 3_97_69, "annotations": target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=a_ , annotations=a_ , return_tensors='pt')
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , a_)
_UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a_ , atol=1E-4))
# verify area
_UpperCAmelCase = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a_))
# verify boxes
_UpperCAmelCase = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , a_)
_UpperCAmelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a_ , atol=1E-3))
# verify image_id
_UpperCAmelCase = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a_))
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a_))
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a_))
# verify orig_size
_UpperCAmelCase = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a_))
# verify size
_UpperCAmelCase = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a_))
@slow
def _lowerCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
_UpperCAmelCase = json.loads(f.read())
_UpperCAmelCase = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic')
_UpperCAmelCase = image_processing(images=a_ , annotations=a_ , masks_path=a_ , return_tensors='pt')
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , a_)
_UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , a_ , atol=1E-4))
# verify area
_UpperCAmelCase = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , a_))
# verify boxes
_UpperCAmelCase = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , a_)
_UpperCAmelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , a_ , atol=1E-3))
# verify image_id
_UpperCAmelCase = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , a_))
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , a_))
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , a_))
# verify masks
_UpperCAmelCase = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , a_)
# verify orig_size
_UpperCAmelCase = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , a_))
# verify size
_UpperCAmelCase = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , a_))
| 707
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''roc_bert'''
def __init__( self : List[Any] , A : List[str]=3_05_22 , A : str=7_68 , A : Dict=12 , A : str=12 , A : int=30_72 , A : List[str]="gelu" , A : Union[str, Any]=0.1 , A : List[Any]=0.1 , A : str=5_12 , A : Optional[int]=2 , A : Union[str, Any]=0.0_2 , A : Any=1E-12 , A : Any=True , A : List[str]=0 , A : Optional[int]="absolute" , A : Tuple=None , A : Union[str, Any]=True , A : Dict=True , A : Union[str, Any]=7_68 , A : Any=9_10 , A : int=5_12 , A : List[Any]=2_48_58 , A : Union[str, Any]=True , **A : List[str] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = enable_pronunciation
_UpperCAmelCase = enable_shape
_UpperCAmelCase = pronunciation_embed_dim
_UpperCAmelCase = pronunciation_vocab_size
_UpperCAmelCase = shape_embed_dim
_UpperCAmelCase = shape_vocab_size
_UpperCAmelCase = concat_input
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = classifier_dropout
super().__init__(pad_token_id=A , **A)
| 708
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 0
|
from random import randint, random
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 5 , ) -> list:
'''simple docstring'''
_UpperCAmelCase = [[-1] * number_of_cells] # Create a highway without any car
_UpperCAmelCase = 0
_UpperCAmelCase = max(_A , 0 )
while i < number_of_cells:
_UpperCAmelCase = (
randint(0 , _A ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def A ( _UpperCAmelCase : list , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = highway_now[car_index + 1 :]
for cell in range(len(_A ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_A , -1 )
def A ( _UpperCAmelCase : list , _UpperCAmelCase : float , _UpperCAmelCase : int ) -> list:
'''simple docstring'''
_UpperCAmelCase = len(_A )
# Beforce calculations, the highway is empty
_UpperCAmelCase = [-1] * number_of_cells
for car_index in range(_A ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_UpperCAmelCase = min(highway_now[car_index] + 1 , _A )
# Number of empty cell before the next car
_UpperCAmelCase = get_distance(_A , _A ) - 1
# We can't have the car causing an accident
_UpperCAmelCase = min(next_highway[car_index] , _A )
if random() < probability:
# Randomly, a driver will slow down
_UpperCAmelCase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def A ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : float , _UpperCAmelCase : int ) -> list:
'''simple docstring'''
_UpperCAmelCase = len(highway[0] )
for i in range(_A ):
_UpperCAmelCase = update(highway[i] , _A , _A )
_UpperCAmelCase = [-1] * number_of_cells
for car_index in range(_A ):
_UpperCAmelCase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_UpperCAmelCase = (car_index + speed) % number_of_cells
# Commit the change of position
_UpperCAmelCase = speed
highway.append(_A )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 0
|
class __lowerCAmelCase : # Public class to implement a graph
def __init__( self : List[Any] , A : int , A : int , A : list[list[bool]]) -> None:
"""simple docstring"""
_UpperCAmelCase = row
_UpperCAmelCase = col
_UpperCAmelCase = graph
def _lowerCamelCase ( self : Tuple , A : int , A : int , A : list[list[bool]]) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _lowerCamelCase ( self : Optional[Any] , A : int , A : int , A : list[list[bool]]) -> None:
"""simple docstring"""
_UpperCAmelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_UpperCAmelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
_UpperCAmelCase = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase__):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase__)
def _lowerCamelCase ( self : int) -> int: # And finally, count all islands.
"""simple docstring"""
_UpperCAmelCase = [[False for j in range(self.COL)] for i in range(self.ROW)]
_UpperCAmelCase = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
count += 1
return count
| 710
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 0
|
from collections import namedtuple
UpperCAmelCase__ = namedtuple("from_to", "from_ to")
UpperCAmelCase__ = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 1000),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.0_0454, 264.172),
'''cubicyard''': from_to(0.7_6455, 1.3_0795),
'''cubicfoot''': from_to(0.028, 35.3147),
'''cup''': from_to(0.0_0023_6588, 4226.75),
}
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ', '.join(_UpperCAmelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ', '.join(_UpperCAmelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 0
|
from math import sqrt
def A ( _UpperCAmelCase : Optional[int] ) -> bool:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
_UpperCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
_UpperCAmelCase = False
for divisor in range(2 , int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_UpperCAmelCase = False
break
# precondition
assert isinstance(__snake_case , __snake_case ), "'status' must been from type bool"
return status
def A ( _UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_UpperCAmelCase = list(range(2 , n + 1 ) )
_UpperCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 , len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_UpperCAmelCase = 0
# filters actual prime numbers.
_UpperCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def A ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n > 2), "'N' must been an int and > 2"
_UpperCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def A ( _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and number >= 0, "'number' must been an int and >= 0"
_UpperCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
_UpperCAmelCase = 2
_UpperCAmelCase = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type list"
return ans
def A ( _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCAmelCase = 0
# prime factorization of 'number'
_UpperCAmelCase = prime_factorization(__snake_case )
_UpperCAmelCase = max(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def A ( _UpperCAmelCase : str ) -> int:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCAmelCase = 0
# prime factorization of 'number'
_UpperCAmelCase = prime_factorization(__snake_case )
_UpperCAmelCase = min(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ), "'ans' must been from type int"
return ans
def A ( _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 , __snake_case ), "compare bust been from type bool"
return number % 2 == 0
def A ( _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 , __snake_case ), "compare bust been from type bool"
return number % 2 != 0
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case , __snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
_UpperCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_UpperCAmelCase = get_prime_numbers(__snake_case )
_UpperCAmelCase = len(__snake_case )
# run variable for while-loops.
_UpperCAmelCase = 0
_UpperCAmelCase = None
# exit variable. for break up the loops
_UpperCAmelCase = True
while i < len_pn and loop:
_UpperCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_UpperCAmelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def A ( _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_UpperCAmelCase = 0
while numbera != 0:
_UpperCAmelCase = numbera % numbera
_UpperCAmelCase = numbera
_UpperCAmelCase = rest
# precondition
assert isinstance(__snake_case , __snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_UpperCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_UpperCAmelCase = prime_factorization(__snake_case )
_UpperCAmelCase = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = max(__snake_case , __snake_case )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_UpperCAmelCase = prime_fac_a.count(__snake_case )
_UpperCAmelCase = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case , __snake_case ) ):
ans *= n
else:
_UpperCAmelCase = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_UpperCAmelCase = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case , __snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'number' must been a positive int"
_UpperCAmelCase = 0
_UpperCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case , __snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_UpperCAmelCase = p_number_a + 1 # jump to the next number
_UpperCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case , __snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def A ( _UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n >= 1), "'n' must been int and >= 1"
_UpperCAmelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def A ( _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
_UpperCAmelCase = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_UpperCAmelCase = gcd(abs(__snake_case ) , abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case , __snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def A ( _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been a int and >= 0"
_UpperCAmelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def A ( _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (n >= 0), "'n' must been an int and >= 0"
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = 1 # this will be return
for _ in range(n - 1 ):
_UpperCAmelCase = ans
ans += fiba
_UpperCAmelCase = tmp
return ans
| 712
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 0
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self : Optional[int] , A : str , A : Optional[Any]=13 , A : Tuple=7 , A : Dict=True , A : Any=True , A : int=False , A : Optional[Any]=True , A : Optional[int]=99 , A : str=32 , A : Tuple=5 , A : Dict=4 , A : List[str]=37 , A : List[str]="gelu" , A : Any=0.1 , A : List[Any]=0.1 , A : Tuple=5_12 , A : Dict=16 , A : List[str]=2 , A : Optional[Any]=0.0_2 , A : str=3 , A : Tuple=4 , A : int=None , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : str , A : Optional[Any] , A : int , A : Any , A : List[Any] , A : Tuple , A : List[str] , A : Dict) -> str:
"""simple docstring"""
_UpperCAmelCase = BioGptModel(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
_UpperCAmelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)
_UpperCAmelCase = model(lowerCamelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[Any] , A : List[str] , A : Union[str, Any] , A : int , A : Optional[int] , A : Dict , A : Tuple , A : Union[str, Any] , A : Optional[int] , ) -> int:
"""simple docstring"""
_UpperCAmelCase = BioGptForCausalLM(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
_UpperCAmelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : Dict , A : str , A : Optional[int] , A : Tuple , A : Dict , A : List[Any] , *A : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = BioGptModel(config=lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
# create attention mask
_UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase_)
_UpperCAmelCase = self.seq_length // 2
_UpperCAmelCase = 0
# first forward pass
_UpperCAmelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_).to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
_UpperCAmelCase = ids_tensor((1,) , lowerCamelCase_).item() + 1
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
_UpperCAmelCase = random_other_next_tokens
# append to next input_ids and attn_mask
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCAmelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCamelCase_)] , dim=1 , )
# get two different outputs
_UpperCAmelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)["""last_hidden_state"""]
_UpperCAmelCase = model(lowerCamelCase_ , past_key_values=lowerCamelCase_ , attention_mask=lowerCamelCase_)["""last_hidden_state"""]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3))
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : Optional[Any] , A : Any , A : int , A : Tuple , *A : int) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = BioGptModel(config=lowerCamelCase_).to(lowerCamelCase_).eval()
_UpperCAmelCase = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCamelCase_)
# first forward pass
_UpperCAmelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_)
_UpperCAmelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCAmelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1)
_UpperCAmelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)["""last_hidden_state"""]
_UpperCAmelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_)[
"""last_hidden_state"""
]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3))
def _lowerCamelCase ( self : Optional[int] , A : int , A : Union[str, Any] , A : Union[str, Any] , A : Union[str, Any] , A : Union[str, Any] , *A : Tuple , A : Optional[int]=False) -> Dict:
"""simple docstring"""
_UpperCAmelCase = BioGptForCausalLM(lowerCamelCase_)
model.to(lowerCamelCase_)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_UpperCAmelCase = model(lowerCamelCase_ , labels=lowerCamelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def _lowerCamelCase ( self : Optional[int] , A : str , *A : int) -> Any:
"""simple docstring"""
_UpperCAmelCase = BioGptModel(lowerCamelCase_)
_UpperCAmelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.0_0_1)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.0_1)
def _lowerCamelCase ( self : str , A : Union[str, Any] , A : Any , A : str , A : str , A : Any , *A : Any) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = BioGptForTokenClassification(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
_UpperCAmelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
UpperCamelCase = (BioGptForCausalLM,) if is_torch_available() else ()
UpperCamelCase = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = BioGptModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37)
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_)
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*lowerCamelCase_)
def _lowerCamelCase ( self : List[str]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCamelCase_)
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCamelCase_ , gradient_checkpointing=lowerCamelCase_)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCamelCase_)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCamelCase_)
def _lowerCamelCase ( self : Dict) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCamelCase_)
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
model.to(lowerCamelCase_)
_UpperCAmelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt')
_UpperCAmelCase = """left"""
# Define PAD Token = EOS Token = 50256
_UpperCAmelCase = tokenizer.eos_token
_UpperCAmelCase = model.config.eos_token_id
# use different length sentences to test batching
_UpperCAmelCase = [
"""Hello, my dog is a little""",
"""Today, I""",
]
_UpperCAmelCase = tokenizer(lowerCamelCase_ , return_tensors='pt' , padding=lowerCamelCase_)
_UpperCAmelCase = inputs["""input_ids"""].to(lowerCamelCase_)
_UpperCAmelCase = model.generate(
input_ids=lowerCamelCase_ , attention_mask=inputs['attention_mask'].to(lowerCamelCase_) , )
_UpperCAmelCase = tokenizer(sentences[0] , return_tensors='pt').input_ids.to(lowerCamelCase_)
_UpperCAmelCase = model.generate(input_ids=lowerCamelCase_)
_UpperCAmelCase = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
_UpperCAmelCase = tokenizer(sentences[1] , return_tensors='pt').input_ids.to(lowerCamelCase_)
_UpperCAmelCase = model.generate(input_ids=lowerCamelCase_ , max_length=model.config.max_length - num_paddings)
_UpperCAmelCase = tokenizer.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_)
_UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase_)
_UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase_)
_UpperCAmelCase = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , [non_padded_sentence, padded_sentence])
@slow
def _lowerCamelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = BioGptModel.from_pretrained(lowerCamelCase_)
self.assertIsNotNone(lowerCamelCase_)
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = input_dict["""input_ids"""]
_UpperCAmelCase = input_ids.ne(1).to(lowerCamelCase_)
_UpperCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
_UpperCAmelCase = BioGptForSequenceClassification(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
_UpperCAmelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def _lowerCamelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = """multi_label_classification"""
_UpperCAmelCase = input_dict["""input_ids"""]
_UpperCAmelCase = input_ids.ne(1).to(lowerCamelCase_)
_UpperCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
_UpperCAmelCase = BioGptForSequenceClassification(lowerCamelCase_)
model.to(lowerCamelCase_)
model.eval()
_UpperCAmelCase = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
_UpperCAmelCase = torch.tensor([[2, 48_05, 9, 6_56, 21]])
_UpperCAmelCase = model(lowerCamelCase_)[0]
_UpperCAmelCase = 4_23_84
_UpperCAmelCase = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , lowerCamelCase_)
_UpperCAmelCase = torch.tensor(
[[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4))
@slow
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = BioGptTokenizer.from_pretrained('microsoft/biogpt')
_UpperCAmelCase = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
model.to(lowerCamelCase_)
torch.manual_seed(0)
_UpperCAmelCase = tokenizer('COVID-19 is' , return_tensors='pt').to(lowerCamelCase_)
_UpperCAmelCase = model.generate(
**lowerCamelCase_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCamelCase_ , )
_UpperCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCamelCase_)
_UpperCAmelCase = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(lowerCamelCase_ , lowerCamelCase_)
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
UpperCAmelCase__ = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
UpperCAmelCase__ = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = " Hello world! cécé herlolip"
UpperCAmelCase__ = [
("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
]
def A ( _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = dct.pop(A__ )
_UpperCAmelCase = val
def A ( _UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = torch.load(A__ , map_location='cpu' )
_UpperCAmelCase = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(A__ , A__ , bias=A__ )
_UpperCAmelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict=None ) -> str:
'''simple docstring'''
if not os.path.exists(A__ ):
_UpperCAmelCase = torch.hub.load('pytorch/fairseq' , A__ ).eval()
else:
_UpperCAmelCase = load_xsum_checkpoint(A__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_UpperCAmelCase = checkpoint_path.replace('.' , '-' )
_UpperCAmelCase = BartConfig.from_pretrained(A__ )
_UpperCAmelCase = bart.encode(A__ ).unsqueeze(0 )
_UpperCAmelCase = BartTokenizer.from_pretrained(A__ ).encode(A__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(A__ , A__ ).all():
raise ValueError(
F"converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}" )
if checkpoint_path == "bart.large.mnli":
_UpperCAmelCase = bart.state_dict()
remove_ignore_keys_(A__ )
_UpperCAmelCase = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(A__ , A__ , A__ )
_UpperCAmelCase = BartForSequenceClassification(A__ ).eval()
model.load_state_dict(A__ )
_UpperCAmelCase = bart.predict('mnli' , A__ , return_logits=A__ )
_UpperCAmelCase = model(A__ )[0] # logits
else: # no classification heads to worry about
_UpperCAmelCase = bart.model.state_dict()
remove_ignore_keys_(A__ )
_UpperCAmelCase = state_dict['decoder.embed_tokens.weight']
_UpperCAmelCase = bart.extract_features(A__ )
if hf_checkpoint_name == "facebook/bart-large":
_UpperCAmelCase = BartModel(A__ ).eval()
model.load_state_dict(A__ )
_UpperCAmelCase = model(A__ ).model[0]
else:
_UpperCAmelCase = BartForConditionalGeneration(A__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(A__ )
if hasattr(A__ , 'lm_head' ):
_UpperCAmelCase = make_linear_from_emb(model.model.shared )
_UpperCAmelCase = model.model(A__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum"
)
UpperCAmelCase__ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 714
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 715
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 0
|
def A ( _UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
_UpperCAmelCase = [1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0, 0, 0
_UpperCAmelCase = ugly_nums[ia] * 2
_UpperCAmelCase = ugly_nums[ia] * 3
_UpperCAmelCase = ugly_nums[ia] * 5
for _ in range(1 , a__ ):
_UpperCAmelCase = min(a__ , a__ , a__ )
ugly_nums.append(a__ )
if next_num == next_a:
ia += 1
_UpperCAmelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_UpperCAmelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_UpperCAmelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f"""{ugly_numbers(200) = }""")
| 716
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 0
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCAmelCase__ : List[str] = "scheduler_config.json"
class __lowerCAmelCase ( __lowerCAmelCase ):
UpperCamelCase = 1
UpperCamelCase = 2
UpperCamelCase = 3
UpperCamelCase = 4
UpperCamelCase = 5
UpperCamelCase = 6
UpperCamelCase = 7
UpperCamelCase = 8
UpperCamelCase = 9
UpperCamelCase = 1_0
UpperCamelCase = 1_1
UpperCamelCase = 1_2
UpperCamelCase = 1_3
UpperCamelCase = 1_4
@dataclass
class __lowerCAmelCase ( __lowerCAmelCase ):
UpperCamelCase = 42
class __lowerCAmelCase :
UpperCamelCase = SCHEDULER_CONFIG_NAME
UpperCamelCase = []
UpperCamelCase = True
@classmethod
def _lowerCamelCase ( cls : str , A : int = None , A : Dict = None , A : Optional[Any]=False , **A : Optional[Any] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = cls.load_config(
pretrained_model_name_or_path=lowerCAmelCase_ , subfolder=lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ , return_commit_hash=lowerCAmelCase_ , **lowerCAmelCase_ , )
return cls.from_config(lowerCAmelCase_ , return_unused_kwargs=lowerCAmelCase_ , **lowerCAmelCase_)
def _lowerCamelCase ( self : Dict , A : Tuple , A : Dict = False , **A : str) -> List[Any]:
"""simple docstring"""
self.save_config(save_directory=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ , **lowerCAmelCase_)
@property
def _lowerCamelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def _lowerCamelCase ( cls : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = list(set([cls.__name__] + cls._compatibles))
_UpperCAmelCase = importlib.import_module(__name__.split('.')[0])
_UpperCAmelCase = [
getattr(lowerCAmelCase_ , lowerCAmelCase_) for c in compatible_classes_str if hasattr(lowerCAmelCase_ , lowerCAmelCase_)
]
return compatible_classes
| 717
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = '▁'
UpperCAmelCase__ = {'vocab_file': 'prophetnet.tokenizer'}
UpperCAmelCase__ = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
UpperCAmelCase__ = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
UpperCAmelCase__ = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def A ( _UpperCAmelCase : Any ) -> int:
'''simple docstring'''
_UpperCAmelCase = collections.OrderedDict()
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as reader:
_UpperCAmelCase = reader.readlines()
for index, token in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = token.rstrip('\n' )
_UpperCAmelCase = index
return vocab
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self : Tuple , A : str , A : Optional[int]="[SEP]" , A : Any="[SEP]" , A : Optional[Any]="[SEP]" , A : int="[UNK]" , A : List[str]="[PAD]" , A : Tuple="[CLS]" , A : List[str]="[MASK]" , A : Optional[Dict[str, Any]] = None , **A : int , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece')
raise
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase__))
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_UpperCAmelCase = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10):
_UpperCAmelCase = F"[unused{i}]"
_UpperCAmelCase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_UpperCAmelCase = 12
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCAmelCase__)
def __getstate__( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Dict , A : Tuple) -> Dict:
"""simple docstring"""
_UpperCAmelCase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece')
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowerCamelCase ( self : Dict , A : List[int] , A : Optional[List[int]] = None , A : bool = False) -> Any:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__)
if token_ids_a is None:
return ([0] * len(UpperCAmelCase__)) + [1]
return ([0] * len(UpperCAmelCase__)) + [1] + ([0] * len(UpperCAmelCase__)) + [1]
def _lowerCamelCase ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None) -> Any:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def _lowerCamelCase ( self : Any) -> List[str]:
"""simple docstring"""
return len(self.sp_model) + self.fairseq_offset
def _lowerCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _lowerCamelCase ( self : List[str] , A : str) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__)
def _lowerCamelCase ( self : Optional[Any] , A : Optional[Any]) -> Any:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(UpperCAmelCase__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCamelCase ( self : int , A : int) -> int:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def _lowerCamelCase ( self : Optional[Any] , A : Dict) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ''''''.join(UpperCAmelCase__).replace(UpperCAmelCase__ , ' ').strip()
return out_string
def _lowerCamelCase ( self : int , A : str , A : Optional[str] = None) -> Optional[int]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase__ , 'wb') as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__)
return (out_vocab_file,)
def _lowerCamelCase ( self : int , A : List[int] , A : Optional[List[int]] = None) -> Tuple:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 718
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 0
|
import string
import numpy
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> int:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , _UpperCAmelCase )
class __lowerCAmelCase :
UpperCamelCase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCamelCase = numpy.vectorize(lambda A : x % 3_6 )
UpperCamelCase = numpy.vectorize(SCREAMING_SNAKE_CASE__ )
def __init__( self : Dict , A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.modulus(snake_case__) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_UpperCAmelCase = encrypt_key.shape[0]
def _lowerCamelCase ( self : Union[str, Any] , A : str) -> List[Any]:
"""simple docstring"""
return self.key_string.index(snake_case__)
def _lowerCamelCase ( self : Tuple , A : Optional[int]) -> str:
"""simple docstring"""
return self.key_string[round(snake_case__)]
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_UpperCAmelCase = det % len(self.key_string)
_UpperCAmelCase = len(self.key_string)
if greatest_common_divisor(snake_case__ , len(self.key_string)) != 1:
_UpperCAmelCase = (
F"determinant modular {req_l} of encryption key({det}) "
F"is not co prime w.r.t {req_l}.\nTry another key."
)
raise ValueError(snake_case__)
def _lowerCamelCase ( self : Tuple , A : Optional[int]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = [char for char in text.upper() if char in self.key_string]
_UpperCAmelCase = chars[-1]
while len(snake_case__) % self.break_key != 0:
chars.append(snake_case__)
return "".join(snake_case__)
def _lowerCamelCase ( self : Any , A : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.process_text(text.upper())
_UpperCAmelCase = ""
for i in range(0 , len(snake_case__) - self.break_key + 1 , self.break_key):
_UpperCAmelCase = text[i : i + self.break_key]
_UpperCAmelCase = [self.replace_letters(snake_case__) for char in batch]
_UpperCAmelCase = numpy.array([vec]).T
_UpperCAmelCase = self.modulus(self.encrypt_key.dot(snake_case__)).T.tolist()[
0
]
_UpperCAmelCase = "".join(
self.replace_digits(snake_case__) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def _lowerCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
_UpperCAmelCase = det % len(self.key_string)
_UpperCAmelCase = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
_UpperCAmelCase = i
break
_UpperCAmelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(snake_case__))
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.make_decrypt_key()
_UpperCAmelCase = self.process_text(text.upper())
_UpperCAmelCase = ""
for i in range(0 , len(snake_case__) - self.break_key + 1 , self.break_key):
_UpperCAmelCase = text[i : i + self.break_key]
_UpperCAmelCase = [self.replace_letters(snake_case__) for char in batch]
_UpperCAmelCase = numpy.array([vec]).T
_UpperCAmelCase = self.modulus(decrypt_key.dot(snake_case__)).T.tolist()[0]
_UpperCAmelCase = "".join(
self.replace_digits(snake_case__) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def A ( ) -> None:
'''simple docstring'''
_UpperCAmelCase = int(input('Enter the order of the encryption key: ' ) )
_UpperCAmelCase = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(_UpperCAmelCase ):
_UpperCAmelCase = [int(_UpperCAmelCase ) for x in input().split()]
hill_matrix.append(_UpperCAmelCase )
_UpperCAmelCase = HillCipher(numpy.array(_UpperCAmelCase ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
_UpperCAmelCase = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
_UpperCAmelCase = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(_UpperCAmelCase ) )
elif option == "2":
_UpperCAmelCase = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(_UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 719
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 0
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger("transformers.models.speecht5")
UpperCAmelCase__ = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
UpperCAmelCase__ = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
UpperCAmelCase__ = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
UpperCAmelCase__ = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
UpperCAmelCase__ = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
UpperCAmelCase__ = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
UpperCAmelCase__ = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
UpperCAmelCase__ = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
UpperCAmelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
UpperCAmelCase__ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase__ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
UpperCAmelCase__ = []
UpperCAmelCase__ = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
UpperCAmelCase__ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
UpperCAmelCase__ = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
UpperCAmelCase__ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('.' ):
_UpperCAmelCase = getattr(__a , __a )
if weight_type is not None:
_UpperCAmelCase = getattr(__a , __a ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "running_mean":
_UpperCAmelCase = value
elif weight_type == "running_var":
_UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_UpperCAmelCase , _UpperCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = []
if task == "s2t":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2T
_UpperCAmelCase = IGNORE_KEYS_S2T
elif task == "t2s":
_UpperCAmelCase = None
_UpperCAmelCase = MAPPING_T2S
_UpperCAmelCase = IGNORE_KEYS_T2S
elif task == "s2s":
_UpperCAmelCase = hf_model.speechta.encoder.prenet.feature_encoder
_UpperCAmelCase = MAPPING_S2S
_UpperCAmelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"Unsupported task: {task}" )
for name, value in fairseq_dict.items():
if should_ignore(__a , __a ):
logger.info(F"{name} was ignored" )
continue
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_UpperCAmelCase , _UpperCAmelCase = key.split('.*.' )
if prefix in name and suffix in name:
_UpperCAmelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(__a )[0].split('.' )[-2]
_UpperCAmelCase = mapped_key.replace('*' , __a )
if "weight_g" in name:
_UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
_UpperCAmelCase = 'weight_v'
elif "bias" in name:
_UpperCAmelCase = 'bias'
elif "weight" in name:
_UpperCAmelCase = 'weight'
elif "running_mean" in name:
_UpperCAmelCase = 'running_mean'
elif "running_var" in name:
_UpperCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
_UpperCAmelCase = 'num_batches_tracked'
else:
_UpperCAmelCase = None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"Unused weights: {unused_weights}" )
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = full_name.split('conv_layers.' )[-1]
_UpperCAmelCase = name.split('.' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_UpperCAmelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_UpperCAmelCase = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__a )
@torch.no_grad()
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Union[str, Any]=None , ) -> str:
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = SpeechTaConfig.from_pretrained(__a )
else:
_UpperCAmelCase = SpeechTaConfig()
if task == "s2t":
_UpperCAmelCase = config.max_text_positions
_UpperCAmelCase = SpeechTaForSpeechToText(__a )
elif task == "t2s":
_UpperCAmelCase = 1_876
_UpperCAmelCase = 600
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForTextToSpeech(__a )
elif task == "s2s":
_UpperCAmelCase = 1_876
_UpperCAmelCase = config.max_speech_positions
_UpperCAmelCase = SpeechTaForSpeechToSpeech(__a )
else:
raise ValueError(F"Unknown task name: {task}" )
if vocab_path:
_UpperCAmelCase = SpeechTaTokenizer(__a , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken('<mask>' , lstrip=__a , rstrip=__a )
_UpperCAmelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
_UpperCAmelCase = SpeechTaFeatureExtractor()
_UpperCAmelCase = SpeechTaProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(__a )
_UpperCAmelCase = torch.load(__a )
recursively_load_weights(fairseq_checkpoint['model'] , __a , __a )
model.save_pretrained(__a )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(__a )
model.push_to_hub(__a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCAmelCase__ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 720
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 0
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCAmelCase__ = _symbol_database.Default()
UpperCAmelCase__ = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
UpperCAmelCase__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCAmelCase__ = None
UpperCAmelCase__ = B"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCAmelCase__ = 45
UpperCAmelCase__ = 1581
UpperCAmelCase__ = 1517
UpperCAmelCase__ = 1570
UpperCAmelCase__ = 1584
UpperCAmelCase__ = 1793
UpperCAmelCase__ = 1795
UpperCAmelCase__ = 1916
UpperCAmelCase__ = 1864
UpperCAmelCase__ = 1905
UpperCAmelCase__ = 1919
UpperCAmelCase__ = 2429
UpperCAmelCase__ = 2208
UpperCAmelCase__ = 2418
UpperCAmelCase__ = 2323
UpperCAmelCase__ = 2407
# @@protoc_insertion_point(module_scope)
| 721
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 0
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = inspect.getfile(accelerate.test_utils )
UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCamelCase = ['''accelerate''', '''launch''']
UpperCamelCase = Path.home() / '''.cache/huggingface/accelerate'''
UpperCamelCase = '''default_config.yaml'''
UpperCamelCase = config_folder / config_file
UpperCamelCase = config_folder / '''_default_config.yaml'''
UpperCamelCase = Path('''tests/test_configs''' )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def _lowerCamelCase ( cls : Optional[int]) -> int:
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy())
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
for config in sorted(self.test_config_path.glob('**/*.yaml')):
with self.subTest(config_file=UpperCAmelCase__):
execute_subprocess_async(
self.base_cmd + ['--config_file', str(UpperCAmelCase__), self.test_file_path] , env=os.environ.copy())
def _lowerCamelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy())
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = '''test-tpu'''
UpperCamelCase = '''us-central1-a'''
UpperCamelCase = '''ls'''
UpperCamelCase = ['''accelerate''', '''tpu-config''']
UpperCamelCase = '''cd /usr/share'''
UpperCamelCase = '''tests/test_samples/test_command_file.sh'''
UpperCamelCase = '''Running gcloud compute tpus tpu-vm ssh'''
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCAmelCase = run_command(
self.cmd
+ ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , UpperCAmelCase__ , )
def _lowerCamelCase ( self : Dict) -> str:
"""simple docstring"""
_UpperCAmelCase = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command',
self.command,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , UpperCAmelCase__ , )
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=UpperCAmelCase__)
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , UpperCAmelCase__ , )
def _lowerCamelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCAmelCase = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , UpperCAmelCase__ , )
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
_UpperCAmelCase = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--command',
self.command,
'--command',
'echo "Hello World"',
'--debug',
] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all" , UpperCAmelCase__ , )
def _lowerCamelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = run_command(
self.cmd
+ ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , UpperCAmelCase__ , )
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/0_12_0.yaml',
'--command_file',
self.command_file,
'--tpu_zone',
self.tpu_zone,
'--tpu_name',
self.tpu_name,
'--debug',
] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , UpperCAmelCase__ , )
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = run_command(
self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all" , UpperCAmelCase__ , )
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = run_command(
self.cmd
+ [
'--config_file',
'tests/test_configs/latest.yaml',
'--install_accelerate',
'--accelerate_version',
'12.0.0',
'--debug',
] , return_stdout=UpperCAmelCase__ , )
self.assertIn(
F"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all" , UpperCAmelCase__ , )
| 700
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
_UpperCAmelCase = True if 'large' in model_name or 'huge' in model_name else False
_UpperCAmelCase = True if 'large' in model_name or 'huge' in model_name else False
_UpperCAmelCase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
_UpperCAmelCase = [3, 3, 3, 3]
_UpperCAmelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
_UpperCAmelCase = [4, 4, 4, 4]
_UpperCAmelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
_UpperCAmelCase = [3, 3, 3, 3]
if "lrf" in model_name:
_UpperCAmelCase = [3, 3, 3, 3]
else:
_UpperCAmelCase = [2, 2, 2, 2]
if "tiny" in model_name:
_UpperCAmelCase = 96
elif "small" in model_name:
_UpperCAmelCase = 96
elif "base" in model_name:
_UpperCAmelCase = 128
elif "large" in model_name:
_UpperCAmelCase = 192
elif "xlarge" in model_name:
_UpperCAmelCase = 256
elif "huge" in model_name:
_UpperCAmelCase = 352
# set label information
_UpperCAmelCase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
_UpperCAmelCase = 'imagenet-22k-id2label.json'
else:
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = FocalNetConfig(
embed_dim=_lowerCAmelCase , depths=_lowerCAmelCase , focal_levels=_lowerCAmelCase , focal_windows=_lowerCAmelCase , use_conv_embed=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase , use_post_layernorm=_lowerCAmelCase , use_layerscale=_lowerCAmelCase , )
return config
def A ( _UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
if "patch_embed.proj" in name:
_UpperCAmelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_UpperCAmelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
_UpperCAmelCase = 'encoder.' + name
if "encoder.layers" in name:
_UpperCAmelCase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
_UpperCAmelCase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
_UpperCAmelCase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
_UpperCAmelCase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
_UpperCAmelCase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
_UpperCAmelCase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
_UpperCAmelCase = 'layernorm.weight'
if name == "norm.bias":
_UpperCAmelCase = 'layernorm.bias'
if "head" in name:
_UpperCAmelCase = name.replace('head' , 'classifier' )
else:
_UpperCAmelCase = 'focalnet.' + name
return name
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Any=False ) -> Union[str, Any]:
'''simple docstring'''
# fmt: off
_UpperCAmelCase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
_UpperCAmelCase = model_name_to_url[model_name]
print('Checkpoint URL: ' , _lowerCAmelCase )
_UpperCAmelCase = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
_UpperCAmelCase = state_dict.pop(_lowerCAmelCase )
_UpperCAmelCase = val
_UpperCAmelCase = get_focalnet_config(_lowerCAmelCase )
_UpperCAmelCase = FocalNetForImageClassification(_lowerCAmelCase )
model.eval()
# load state dict
model.load_state_dict(_lowerCAmelCase )
# verify conversion
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = BitImageProcessor(
do_resize=_lowerCAmelCase , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowerCAmelCase , crop_size=224 , do_normalize=_lowerCAmelCase , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase , )
_UpperCAmelCase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
_UpperCAmelCase = processor(images=_lowerCAmelCase , return_tensors='pt' )
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
_UpperCAmelCase = image_transforms(_lowerCAmelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowerCAmelCase , atol=1E-4 )
_UpperCAmelCase = model(**_lowerCAmelCase )
_UpperCAmelCase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
_UpperCAmelCase = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
_UpperCAmelCase = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
_UpperCAmelCase = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
_UpperCAmelCase = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
_UpperCAmelCase = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
_UpperCAmelCase = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
print(F"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(F"{model_name}" )
processor.push_to_hub(F"{model_name}" )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
UpperCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 701
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCAmelCase__ : int = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCAmelCase__ : Optional[Any] = '</w>'
UpperCAmelCase__ : Optional[Any] = '@@ '
def A ( _UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
# Speech2Text2 has no max input length
UpperCAmelCase__ : Dict = {'facebook/s2t-wav2vec2-large-en-de': 1024}
class __lowerCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : str , A : Optional[Any] , A : str="<s>" , A : Optional[Any]="<pad>" , A : Any="</s>" , A : Dict="<unk>" , A : List[str]=False , A : int=None , **A : Any , ) -> str:
"""simple docstring"""
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_UpperCAmelCase = do_lower_case
with open(__lowerCamelCase , encoding='utf-8') as vocab_handle:
_UpperCAmelCase = json.load(__lowerCamelCase)
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding.")
_UpperCAmelCase = None
_UpperCAmelCase = None
else:
with open(__lowerCamelCase , encoding='utf-8') as merges_handle:
_UpperCAmelCase = merges_handle.read().split('\n')[:-1]
_UpperCAmelCase = [tuple(merge.split()[:2]) for merge in merges]
_UpperCAmelCase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase))))
_UpperCAmelCase = {}
@property
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
return len(self.decoder)
def _lowerCamelCase ( self : Optional[int]) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self : str , A : Tuple) -> Any:
"""simple docstring"""
_UpperCAmelCase = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = get_pairs(__lowerCamelCase)
if not pairs:
return token
while True:
_UpperCAmelCase = min(__lowerCamelCase , key=lambda A: self.bpe_ranks.get(__lowerCamelCase , float('inf')))
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(__lowerCamelCase):
try:
_UpperCAmelCase = word.index(__lowerCamelCase , __lowerCamelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_UpperCAmelCase = j
if word[i] == first and i < len(__lowerCamelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_UpperCAmelCase = tuple(__lowerCamelCase)
_UpperCAmelCase = new_word
if len(__lowerCamelCase) == 1:
break
else:
_UpperCAmelCase = get_pairs(__lowerCamelCase)
_UpperCAmelCase = ''' '''.join(__lowerCamelCase)
if word == "\n " + BPE_TOKEN_MERGES:
_UpperCAmelCase = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase):
_UpperCAmelCase = word.replace(__lowerCamelCase , '')
_UpperCAmelCase = word.replace(' ' , __lowerCamelCase)
_UpperCAmelCase = word
return word
def _lowerCamelCase ( self : Union[str, Any] , A : str) -> int:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.')
if self.do_lower_case:
_UpperCAmelCase = text.lower()
_UpperCAmelCase = text.split()
_UpperCAmelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase).split(' ')))
return split_tokens
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int]) -> int:
"""simple docstring"""
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self : str , A : List[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.decoder.get(__lowerCamelCase , self.unk_token)
return result
def _lowerCamelCase ( self : Dict , A : Optional[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = ''' '''.join(__lowerCamelCase)
# make sure @@ tokens are concatenated
_UpperCAmelCase = ''''''.join(string.split(__lowerCamelCase))
return string
def _lowerCamelCase ( self : int , A : int , A : Any = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
__lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(
__lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(__lowerCamelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase) + '\n')
_UpperCAmelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , 'w' , encoding='utf-8') as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!')
_UpperCAmelCase = token_index
writer.write(' '.join(__lowerCamelCase) + '\n')
index += 1
return (vocab_file, merges_file)
| 702
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 0
|
import itertools
import os
import re
UpperCAmelCase__ = re.compile(r"([A-Z]+)([A-Z][a-z])")
UpperCAmelCase__ = re.compile(r"([a-z\d])([A-Z])")
UpperCAmelCase__ = re.compile(r"(?<!_)_(?!_)")
UpperCAmelCase__ = re.compile(r"(_{2,})")
UpperCAmelCase__ = R"^\w+(\.\w+)*$"
UpperCAmelCase__ = R"<>:/\|?*"
def A ( _UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = _uppercase_uppercase_re.sub(R'\1_\2' , UpperCamelCase__ )
_UpperCAmelCase = _lowercase_uppercase_re.sub(R'\1_\2' , UpperCamelCase__ )
return name.lower()
def A ( _UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = _single_underscore_re.split(UpperCamelCase__ )
_UpperCAmelCase = [_multiple_underscores_re.split(UpperCamelCase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCamelCase__ ) if n != '' )
def A ( _UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(UpperCamelCase__ )
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , UpperCamelCase__ ):
raise ValueError(F"Split name should match \'{_split_re}\'\' but got \'{split}\'." )
return F"{filename_prefix_for_name(UpperCamelCase__ )}-{split}"
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
_UpperCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
return F"{filepath}*"
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=None ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if shard_lengths:
_UpperCAmelCase = len(UpperCamelCase__ )
_UpperCAmelCase = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(UpperCamelCase__ )]
if filetype_suffix:
_UpperCAmelCase = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
_UpperCAmelCase = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 703
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPFeatureExtractor"]
UpperCAmelCase__ = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 704
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 0
|
def A ( _UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
_UpperCAmelCase = [[0 for _ in range(_UpperCAmelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_UpperCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , _UpperCAmelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase__ = int(input("Enter a number: ").strip())
print(partition(n))
except ValueError:
print("Please enter a number.")
else:
try:
UpperCAmelCase__ = int(sys.argv[1])
print(partition(n))
except ValueError:
print("Please pass a number.")
| 705
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 0
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __lowercase ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Optional[int] , A : bool = True , A : int = 32 , A : Dict=PILImageResampling.BILINEAR , A : bool = True , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = size_divisor
_UpperCAmelCase = resample
super().__init__(**A)
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : int , A : str , A : Optional[ChannelDimension] = None , **A : List[str]) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_image_size(A)
# Rounds the height and width down to the closest multiple of size_divisor
_UpperCAmelCase = height // size_divisor * size_divisor
_UpperCAmelCase = width // size_divisor * size_divisor
_UpperCAmelCase = resize(A , (new_h, new_w) , resample=A , data_format=A , **A)
return image
def _lowerCamelCase ( self : str , A : np.ndarray , A : float , A : Optional[ChannelDimension] = None , **A : Union[str, Any]) -> np.ndarray:
"""simple docstring"""
return rescale(image=A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , A : Optional[bool] = None , A : Optional[int] = None , A : Tuple=None , A : Optional[bool] = None , A : Optional[Union[TensorType, str]] = None , A : ChannelDimension = ChannelDimension.FIRST , **A : Dict , ) -> BatchFeature:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = size_divisor if size_divisor is not None else self.size_divisor
_UpperCAmelCase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing')
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError('Invalid image(s)')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for img in images]
if do_resize:
_UpperCAmelCase = [self.resize(A , size_divisor=A , resample=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(A , scale=1 / 2_55) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=A , tensor_type=A)
| 706
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 0
|
import random
def A ( _UpperCAmelCase : list , _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def A ( _UpperCAmelCase : list , _UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
_UpperCAmelCase = items[random.randint(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )]
_UpperCAmelCase = 0
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE , index - (m + count) )
| 707
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 0
|
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = [1]
for i in range(2 , _UpperCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_UpperCAmelCase = []
_UpperCAmelCase = list(range(_UpperCAmelCase ) )
# Find permutation
while factorials:
_UpperCAmelCase = factorials.pop()
_UpperCAmelCase = divmod(_UpperCAmelCase , _UpperCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 0
|
from math import sqrt
def A ( _UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 0
for i in range(1 , int(sqrt(lowerCAmelCase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCAmelCase__ ):
total += i + n // i
elif i == sqrt(lowerCAmelCase__ ):
total += i
return total - n
def A ( _UpperCAmelCase : int = 10_000 ) -> int:
'''simple docstring'''
_UpperCAmelCase = sum(
i
for i in range(1 , lowerCAmelCase__ )
if sum_of_divisors(sum_of_divisors(lowerCAmelCase__ ) ) == i and sum_of_divisors(lowerCAmelCase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 709
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 0
|
import argparse
import struct
import unittest
class __lowerCAmelCase :
def __init__( self : Optional[Any] , A : Optional[int]) -> None:
"""simple docstring"""
_UpperCAmelCase = data
# Initialize hash values
_UpperCAmelCase = [
0X6a09e667,
0Xbb67ae85,
0X3c6ef372,
0Xa54ff53a,
0X510e527f,
0X9b05688c,
0X1f83d9ab,
0X5be0cd19,
]
# Initialize round constants
_UpperCAmelCase = [
0X428a2f98,
0X71374491,
0Xb5c0fbcf,
0Xe9b5dba5,
0X3956c25b,
0X59f111f1,
0X923f82a4,
0Xab1c5ed5,
0Xd807aa98,
0X12835b01,
0X243185be,
0X550c7dc3,
0X72be5d74,
0X80deb1fe,
0X9bdc06a7,
0Xc19bf174,
0Xe49b69c1,
0Xefbe4786,
0X0fc19dc6,
0X240ca1cc,
0X2de92c6f,
0X4a7484aa,
0X5cb0a9dc,
0X76f988da,
0X983e5152,
0Xa831c66d,
0Xb00327c8,
0Xbf597fc7,
0Xc6e00bf3,
0Xd5a79147,
0X06ca6351,
0X14292967,
0X27b70a85,
0X2e1b2138,
0X4d2c6dfc,
0X53380d13,
0X650a7354,
0X766a0abb,
0X81c2c92e,
0X92722c85,
0Xa2bfe8a1,
0Xa81a664b,
0Xc24b8b70,
0Xc76c51a3,
0Xd192e819,
0Xd6990624,
0Xf40e3585,
0X106aa070,
0X19a4c116,
0X1e376c08,
0X2748774c,
0X34b0bcb5,
0X391c0cb3,
0X4ed8aa4a,
0X5b9cca4f,
0X682e6ff3,
0X748f82ee,
0X78a5636f,
0X84c87814,
0X8cc70208,
0X90befffa,
0Xa4506ceb,
0Xbef9a3f7,
0Xc67178f2,
]
_UpperCAmelCase = self.preprocessing(self.data)
self.final_hash()
@staticmethod
def _lowerCamelCase ( A : Tuple) -> bytes:
"""simple docstring"""
_UpperCAmelCase = B'\x80' + (B'\x00' * (63 - (len(lowerCamelCase_) + 8) % 64))
_UpperCAmelCase = struct.pack('>Q' , (len(lowerCamelCase_) * 8))
return data + padding + big_endian_integer
def _lowerCamelCase ( self : Optional[Any]) -> None:
"""simple docstring"""
_UpperCAmelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data) , 64)
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
_UpperCAmelCase = list(struct.unpack('>16L' , lowerCamelCase_))
# add 48 0-ed integers
words += [0] * 48
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.hashes
for index in range(0 , 64):
if index > 15:
# modify the zero-ed indexes at the end of the array
_UpperCAmelCase = (
self.ror(words[index - 15] , 7)
^ self.ror(words[index - 15] , 18)
^ (words[index - 15] >> 3)
)
_UpperCAmelCase = (
self.ror(words[index - 2] , 17)
^ self.ror(words[index - 2] , 19)
^ (words[index - 2] >> 10)
)
_UpperCAmelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100000000
# Compression
_UpperCAmelCase = self.ror(lowerCamelCase_ , 6) ^ self.ror(lowerCamelCase_ , 11) ^ self.ror(lowerCamelCase_ , 25)
_UpperCAmelCase = (e & f) ^ ((~e & 0Xffffffff) & g)
_UpperCAmelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100000000
_UpperCAmelCase = self.ror(lowerCamelCase_ , 2) ^ self.ror(lowerCamelCase_ , 13) ^ self.ror(lowerCamelCase_ , 22)
_UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c)
_UpperCAmelCase = (sa + maj) % 0X100000000
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (
g,
f,
e,
((d + tempa) % 0X100000000),
c,
b,
a,
((tempa + tempa) % 0X100000000),
)
_UpperCAmelCase = [a, b, c, d, e, f, g, h]
# Modify final values
_UpperCAmelCase = [
((element + mutated_hash_values[index]) % 0X100000000)
for index, element in enumerate(self.hashes)
]
_UpperCAmelCase = ''.join([hex(lowerCamelCase_)[2:].zfill(8) for value in self.hashes])
def _lowerCamelCase ( self : Union[str, Any] , A : int , A : str) -> int:
"""simple docstring"""
return 0Xffffffff & (value << (32 - rotations)) | (value >> rotations)
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]) -> None:
"""simple docstring"""
import hashlib
_UpperCAmelCase = bytes('Test String' , 'utf-8')
self.assertEqual(SHAaaa(lowerCamelCase_).hash , hashlib.shaaaa(lowerCamelCase_).hexdigest())
def A ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
_UpperCAmelCase = f.read()
else:
_UpperCAmelCase = bytes(_UpperCAmelCase , 'utf-8' )
print(SHAaaa(_UpperCAmelCase ).hash )
if __name__ == "__main__":
main()
| 710
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 0
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = 50 # max width of layer names
UpperCAmelCase__ = 70 # max width of quantizer names
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_UpperCAmelCase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_UpperCAmelCase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_UpperCAmelCase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_UpperCAmelCase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_UpperCAmelCase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_UpperCAmelCase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if args.calibrator == "max":
_UpperCAmelCase = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
_UpperCAmelCase = 'histogram'
elif args.calibrator == "mse":
_UpperCAmelCase = 'histogram'
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
_UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=_UpperCAmelCase )
_UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_UpperCAmelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict=False , _UpperCAmelCase : int=False ) -> Optional[Any]:
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_UpperCAmelCase , ['embeddings'] , which='weight' , _disabled=_UpperCAmelCase )
if args.quant_disable:
set_quantizer_by_name(_UpperCAmelCase , [''] , _disabled=_UpperCAmelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_UpperCAmelCase , args.quant_disable_keyword , _disabled=_UpperCAmelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_UpperCAmelCase , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_UpperCAmelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_UpperCAmelCase , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_UpperCAmelCase )
if args.recalibrate_weights:
recalibrate_weights(_UpperCAmelCase )
if args.fuse_qkv:
fuse_qkv(_UpperCAmelCase , _UpperCAmelCase )
if args.clip_gelu:
clip_gelu(_UpperCAmelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_UpperCAmelCase )
def A ( _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_UpperCAmelCase )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
def fusea(_UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ):
for mod in [qq, qk, qv]:
if not hasattr(_UpperCAmelCase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
_UpperCAmelCase = qq._amax.detach().item()
_UpperCAmelCase = qk._amax.detach().item()
_UpperCAmelCase = qv._amax.detach().item()
_UpperCAmelCase = max(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
qq._amax.fill_(_UpperCAmelCase )
qk._amax.fill_(_UpperCAmelCase )
qv._amax.fill_(_UpperCAmelCase )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
_UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_UpperCAmelCase )
_UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def A ( _UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_UpperCAmelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
_UpperCAmelCase = mod.weight.shape[0]
_UpperCAmelCase = mod._weight_quantizer._amax.detach()
_UpperCAmelCase = torch.ones(_UpperCAmelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_UpperCAmelCase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
_UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_UpperCAmelCase , keepdims=_UpperCAmelCase ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
_UpperCAmelCase = amax
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : str=25 , _UpperCAmelCase : List[str]=180 , _UpperCAmelCase : Tuple=None ) -> str:
'''simple docstring'''
if ignore is None:
_UpperCAmelCase = []
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = [ignore]
_UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(_UpperCAmelCase , 'weight' ):
continue
_UpperCAmelCase = max(_UpperCAmelCase , len(_UpperCAmelCase ) )
for name, mod in model.named_modules():
_UpperCAmelCase = getattr(_UpperCAmelCase , '_input_quantizer' , _UpperCAmelCase )
_UpperCAmelCase = getattr(_UpperCAmelCase , '_weight_quantizer' , _UpperCAmelCase )
if not hasattr(_UpperCAmelCase , 'weight' ):
continue
if type(_UpperCAmelCase ) in ignore:
continue
if [True for s in ignore if type(_UpperCAmelCase ) is str and s in name]:
continue
_UpperCAmelCase = F"Act:{input_q.extra_repr()}"
_UpperCAmelCase = F"Wgt:{weight_q.extra_repr()}"
_UpperCAmelCase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(_UpperCAmelCase ) <= line_width:
logger.info(_UpperCAmelCase )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(_UpperCAmelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if quantizer_mod is not None:
assert hasattr(_UpperCAmelCase , _UpperCAmelCase )
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
logger.warning(F"{name} has no {quantizer}" )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]="both" , **_UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(_UpperCAmelCase , _UpperCAmelCase , '_input_quantizer' , _UpperCAmelCase , _UpperCAmelCase )
if which in ["weight", "both"]:
set_quantizer(_UpperCAmelCase , _UpperCAmelCase , '_weight_quantizer' , _UpperCAmelCase , _UpperCAmelCase )
logger.info(_UpperCAmelCase )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_UpperCAmelCase , '_input_quantizer' ) or hasattr(_UpperCAmelCase , '_weight_quantizer' ):
for n in names:
if re.search(_UpperCAmelCase , _UpperCAmelCase ):
set_quantizers(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info(_UpperCAmelCase )
| 711
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 0
|
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
while a != 0:
_UpperCAmelCase , _UpperCAmelCase = b % a, a
return b
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
if gcd(__lowerCAmelCase , __lowerCAmelCase ) != 1:
_UpperCAmelCase = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1, 0, a
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0, 1, m
while va != 0:
_UpperCAmelCase = ua // va
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 712
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 0
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __lowerCAmelCase :
UpperCamelCase = BlenderbotConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self : List[str] , A : Tuple , A : Dict=13 , A : Union[str, Any]=7 , A : Optional[int]=True , A : int=False , A : str=99 , A : Optional[int]=32 , A : Optional[Any]=2 , A : Dict=4 , A : Union[str, Any]=37 , A : Tuple=0.1 , A : Optional[Any]=0.1 , A : List[str]=20 , A : Any=2 , A : Union[str, Any]=1 , A : List[str]=0 , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def _lowerCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1)
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
return config, inputs_dict
def _lowerCamelCase ( self : Dict , A : Union[str, Any] , A : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = TFBlenderbotModel(config=_UpperCAmelCase).get_decoder()
_UpperCAmelCase = inputs_dict['''input_ids''']
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict['''attention_mask'''][:1, :]
_UpperCAmelCase = inputs_dict['''head_mask''']
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase)
_UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
_UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1)
_UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1)
_UpperCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase)[0]
_UpperCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
_UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1]))
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3)
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Any=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Any=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(a_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
UpperCamelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
UpperCamelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = TFBlenderbotModelTester(self)
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCAmelCase)
def _lowerCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase)
@require_tokenizers
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = ['''My friends are cool but they eat too many carbs.''']
UpperCamelCase = '''facebook/blenderbot-400M-distill'''
@cached_property
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name)
@cached_property
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.tokenizer(self.src_text , return_tensors='tf')
_UpperCAmelCase = self.model.generate(
model_inputs.input_ids , )
_UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_UpperCAmelCase)[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : int , **A : Union[str, Any]) -> Optional[Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def _lowerCamelCase ( self : List[str] , A : Optional[Any] , A : Union[str, Any] , A : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection')
_UpperCAmelCase = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def _lowerCamelCase ( self : Dict , A : Optional[int] , A : str) -> int:
"""simple docstring"""
_UpperCAmelCase = object_detector(examples[0] , threshold=0.0)
_UpperCAmelCase = len(_A)
self.assertGreater(_A , 0)
self.assertEqual(
_A , [
{
'score': ANY(_A),
'label': ANY(_A),
'box': {'xmin': ANY(_A), 'ymin': ANY(_A), 'xmax': ANY(_A), 'ymax': ANY(_A)},
}
for i in range(_A)
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF')
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
def _lowerCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection')
_UpperCAmelCase = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_A , decimals=4) , [
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
] , )
_UpperCAmelCase = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.6_4 , )
self.assertEqual(
nested_simplify(_A , decimals=4) , [
[
{'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}},
{'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}},
{'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
{'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}},
{'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}},
]
] , )
@require_torch
@slow
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-object-detection')
_UpperCAmelCase = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_A , decimals=4) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
] , )
_UpperCAmelCase = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_A , decimals=4) , [
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
[
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
{'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}},
{'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF')
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
pass
@require_torch
@slow
def _lowerCamelCase ( self : int) -> str:
"""simple docstring"""
_UpperCAmelCase = 0.2
_UpperCAmelCase = pipeline('zero-shot-object-detection')
_UpperCAmelCase = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_A , )
self.assertEqual(
nested_simplify(_A , decimals=4) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
{'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}},
] , )
@require_torch
@slow
def _lowerCamelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = 2
_UpperCAmelCase = pipeline('zero-shot-object-detection')
_UpperCAmelCase = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_A , )
self.assertEqual(
nested_simplify(_A , decimals=4) , [
{'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}},
{'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}},
] , )
| 714
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 0
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)])
def _lowerCamelCase ( self : Any , A : Union[str, Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = GenerationConfig(
do_sample=__UpperCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCamelCase , config_name=__UpperCamelCase)
_UpperCAmelCase = GenerationConfig.from_pretrained(__UpperCamelCase , config_name=__UpperCamelCase)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __UpperCamelCase)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50)
self.assertEqual(loaded_config.max_length , 20)
self.assertEqual(loaded_config.max_time , __UpperCamelCase)
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = AutoConfig.from_pretrained('gpt2')
_UpperCAmelCase = GenerationConfig.from_model_config(__UpperCamelCase)
_UpperCAmelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__UpperCamelCase , __UpperCamelCase)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = GenerationConfig()
_UpperCAmelCase = {
'max_new_tokens': 10_24,
'foo': 'bar',
}
_UpperCAmelCase = copy.deepcopy(__UpperCamelCase)
_UpperCAmelCase = generation_config.update(**__UpperCamelCase)
# update_kwargs was not modified (no side effects)
self.assertEqual(__UpperCamelCase , __UpperCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__UpperCamelCase , {'foo': 'bar'})
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = GenerationConfig()
_UpperCAmelCase = 'bar'
with tempfile.TemporaryDirectory('test-generation-config') as tmp_dir:
generation_config.save_pretrained(__UpperCamelCase)
_UpperCAmelCase = GenerationConfig.from_pretrained(__UpperCamelCase)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar')
_UpperCAmelCase = GenerationConfig.from_model_config(__UpperCamelCase)
assert not hasattr(__UpperCamelCase , 'foo') # no new kwargs should be initialized if from config
def _lowerCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , __UpperCamelCase)
self.assertEqual(default_config.num_beams , 1)
_UpperCAmelCase = GenerationConfig(
do_sample=__UpperCamelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , __UpperCamelCase)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__UpperCamelCase)
_UpperCAmelCase = GenerationConfig.from_pretrained(__UpperCamelCase , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , __UpperCamelCase)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = TOKEN
HfFolder.save_token(__UpperCamelCase)
@classmethod
def _lowerCamelCase ( cls : Dict) -> List[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-generation-config')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org')
except HTTPError:
pass
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = GenerationConfig(
do_sample=__UpperCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token)
_UpperCAmelCase = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__UpperCamelCase , repo_id='test-generation-config' , push_to_hub=__UpperCamelCase , use_auth_token=self._token)
_UpperCAmelCase = GenerationConfig.from_pretrained(F"{USER}/test-generation-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase))
def _lowerCamelCase ( self : Tuple) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = GenerationConfig(
do_sample=__UpperCamelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token)
_UpperCAmelCase = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__UpperCamelCase , repo_id='valid_org/test-generation-config-org' , push_to_hub=__UpperCamelCase , use_auth_token=self._token)
_UpperCAmelCase = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase))
| 715
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 0
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def A ( _UpperCAmelCase : bool = True , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> int:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
_UpperCAmelCase = False
if main_process_only:
_UpperCAmelCase = PartialState().local_process_index == 0
return _tqdm(*_UpperCamelCase , **_UpperCamelCase , disable=_UpperCamelCase )
| 716
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 0
|
from manim import *
class __lowerCAmelCase ( __UpperCAmelCase ):
def _lowerCamelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = Rectangle(height=0.5 , width=0.5)
_UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0)
_UpperCAmelCase = [mem.copy() for i in range(6)]
_UpperCAmelCase = [mem.copy() for i in range(6)]
_UpperCAmelCase = VGroup(*A).arrange(A , buff=0)
_UpperCAmelCase = VGroup(*A).arrange(A , buff=0)
_UpperCAmelCase = VGroup(A , A).arrange(A , buff=0)
_UpperCAmelCase = Text('CPU' , font_size=24)
_UpperCAmelCase = Group(A , A).arrange(A , buff=0.5 , aligned_edge=A)
cpu.move_to([-2.5, -0.5, 0])
self.add(A)
_UpperCAmelCase = [mem.copy() for i in range(1)]
_UpperCAmelCase = VGroup(*A).arrange(A , buff=0)
_UpperCAmelCase = Text('GPU' , font_size=24)
_UpperCAmelCase = Group(A , A).arrange(A , buff=0.5 , aligned_edge=A)
gpu.align_to(A , A)
gpu.set_x(gpu.get_x() - 1)
self.add(A)
_UpperCAmelCase = [mem.copy() for i in range(6)]
_UpperCAmelCase = VGroup(*A).arrange(A , buff=0)
_UpperCAmelCase = Text('Model' , font_size=24)
_UpperCAmelCase = Group(A , A).arrange(A , buff=0.5 , aligned_edge=A)
model.move_to([3, -1.0, 0])
self.play(
Create(A , run_time=1) , Create(A , run_time=1) , Create(A , run_time=1) , )
_UpperCAmelCase = MarkupText(
F"First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM." , font_size=24 , )
_UpperCAmelCase = Square(side_length=2.2)
key.move_to([-5, 2, 0])
_UpperCAmelCase = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0])
step_a.move_to([2, 2, 0])
self.play(Write(A , run_time=2.5) , Write(A) , Write(A))
self.add(A)
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for i, rect in enumerate(A):
_UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6).set_stroke(width=0.0).set_fill(A , opacity=0.7)
cpu_target.move_to(A)
cpu_target.generate_target()
_UpperCAmelCase = 0.4_6 / 4
_UpperCAmelCase = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT) , buff=0.0_2 , direction=A)
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1)
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=A , buff=0.0)
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=A , buff=0.0)
cpu_targs.append(A)
first_animations.append(rect.animate(run_time=0.5).set_stroke(A))
second_animations.append(MoveToTarget(A , run_time=1.5))
self.play(*A)
self.play(*A)
self.wait()
| 717
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=1_2_8 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCamelCase = field(
default=__lowerCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli' , _UpperCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(_UpperCAmelCase )
datasets.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.set_verbosity(_UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
_UpperCAmelCase = load_dataset(
'xnli' , model_args.language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
_UpperCAmelCase = load_dataset(
'xnli' , model_args.train_language , split='train' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = train_dataset.features['''label'''].names
if training_args.do_eval:
_UpperCAmelCase = load_dataset(
'xnli' , model_args.language , split='validation' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = eval_dataset.features['''label'''].names
if training_args.do_predict:
_UpperCAmelCase = load_dataset(
'xnli' , model_args.language , split='test' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = predict_dataset.features['''label'''].names
# Labels
_UpperCAmelCase = len(_UpperCAmelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_UpperCAmelCase , idalabel={str(_UpperCAmelCase ): label for i, label in enumerate(_UpperCAmelCase )} , labelaid={label: i for i, label in enumerate(_UpperCAmelCase )} , finetuning_task='xnli' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase = False
def preprocess_function(_UpperCAmelCase : Optional[int] ):
# Tokenize the texts
return tokenizer(
examples['premise'] , examples['hypothesis'] , padding=_UpperCAmelCase , max_length=data_args.max_seq_length , truncation=_UpperCAmelCase , )
if training_args.do_train:
if data_args.max_train_samples is not None:
_UpperCAmelCase = min(len(_UpperCAmelCase ) , data_args.max_train_samples )
_UpperCAmelCase = train_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_UpperCAmelCase = train_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on train dataset' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(_UpperCAmelCase ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_UpperCAmelCase = min(len(_UpperCAmelCase ) , data_args.max_eval_samples )
_UpperCAmelCase = eval_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_UpperCAmelCase = eval_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on validation dataset' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
_UpperCAmelCase = min(len(_UpperCAmelCase ) , data_args.max_predict_samples )
_UpperCAmelCase = predict_dataset.select(range(_UpperCAmelCase ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
_UpperCAmelCase = predict_dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on prediction dataset' , )
# Get the metric function
_UpperCAmelCase = evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase : EvalPrediction ):
_UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , _UpperCAmelCase ) else p.predictions
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
return metric.compute(predictions=_UpperCAmelCase , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase = DataCollatorWithPadding(_UpperCAmelCase , pad_to_multiple_of=8 )
else:
_UpperCAmelCase = None
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_UpperCAmelCase , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCAmelCase )
)
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , _UpperCAmelCase )
trainer.save_metrics('train' , _UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate(eval_dataset=_UpperCAmelCase )
_UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCAmelCase )
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('eval' , _UpperCAmelCase )
trainer.save_metrics('eval' , _UpperCAmelCase )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
_UpperCAmelCase = trainer.predict(_UpperCAmelCase , metric_key_prefix='predict' )
_UpperCAmelCase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_UpperCAmelCase )
)
_UpperCAmelCase = min(_UpperCAmelCase , len(_UpperCAmelCase ) )
trainer.log_metrics('predict' , _UpperCAmelCase )
trainer.save_metrics('predict' , _UpperCAmelCase )
_UpperCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
_UpperCAmelCase = os.path.join(training_args.output_dir , 'predictions.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = label_list[item]
writer.write(F"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 718
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 0
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCAmelCase__ = logging.getLogger(__name__)
class __lowerCAmelCase ( UpperCamelCase_ ):
def _lowerCamelCase ( self : Dict , A : Optional[Any] , A : List[str] , A : Optional[Any]=None , A : Tuple=None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.layer[current_layer](A , A , head_mask[current_layer])
_UpperCAmelCase = layer_outputs[0]
return hidden_states
@add_start_docstrings(
'''The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.''' , UpperCamelCase_ , )
class __lowerCAmelCase ( UpperCamelCase_ ):
def __init__( self : str , A : Optional[int]) -> Any:
"""simple docstring"""
super().__init__(A)
_UpperCAmelCase = BertEncoderWithPabee(A)
self.init_weights()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
def _lowerCamelCase ( self : Union[str, Any] , A : List[Any]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = threshold
def _lowerCamelCase ( self : Optional[Any] , A : List[str]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = patience
def _lowerCamelCase ( self : Any) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 0
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.inference_layers_num / self.inference_instances_num
_UpperCAmelCase = (
F"*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="
F" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"
)
print(A)
@add_start_docstrings_to_model_forward(A)
def _lowerCamelCase ( self : List[Any] , A : List[str]=None , A : Optional[Any]=None , A : Optional[int]=None , A : List[Any]=None , A : List[Any]=None , A : str=None , A : Any=None , A : Union[str, Any]=None , A : Optional[Any]=None , A : int=None , A : Optional[Any]=False , ) -> Union[str, Any]:
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time')
elif input_ids is not None:
_UpperCAmelCase = input_ids.size()
elif inputs_embeds is not None:
_UpperCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds')
_UpperCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_UpperCAmelCase = torch.ones(A , device=A)
if token_type_ids is None:
_UpperCAmelCase = torch.zeros(A , dtype=torch.long , device=A)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_UpperCAmelCase = self.get_extended_attention_mask(A , A , A)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_UpperCAmelCase = encoder_hidden_states.size()
_UpperCAmelCase = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_UpperCAmelCase = torch.ones(A , device=A)
_UpperCAmelCase = self.invert_attention_mask(A)
else:
_UpperCAmelCase = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_UpperCAmelCase = self.get_head_mask(A , self.config.num_hidden_layers)
_UpperCAmelCase = self.embeddings(
input_ids=A , position_ids=A , token_type_ids=A , inputs_embeds=A)
_UpperCAmelCase = embedding_output
if self.training:
_UpperCAmelCase = []
for i in range(self.config.num_hidden_layers):
_UpperCAmelCase = self.encoder.adaptive_forward(
A , current_layer=A , attention_mask=A , head_mask=A)
_UpperCAmelCase = self.pooler(A)
_UpperCAmelCase = output_layers[i](output_dropout(A))
res.append(A)
elif self.patience == 0: # Use all layers for inference
_UpperCAmelCase = self.encoder(
A , attention_mask=A , head_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
_UpperCAmelCase = self.pooler(encoder_outputs[0])
_UpperCAmelCase = [output_layers[self.config.num_hidden_layers - 1](A)]
else:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
for i in range(self.config.num_hidden_layers):
calculated_layer_num += 1
_UpperCAmelCase = self.encoder.adaptive_forward(
A , current_layer=A , attention_mask=A , head_mask=A)
_UpperCAmelCase = self.pooler(A)
_UpperCAmelCase = output_layers[i](A)
if regression:
_UpperCAmelCase = logits.detach()
if patient_result is not None:
_UpperCAmelCase = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels) < self.regression_threshold:
patient_counter += 1
else:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = logits.detach().argmax(dim=1)
if patient_result is not None:
_UpperCAmelCase = patient_result.detach().argmax(dim=1)
if (patient_result is not None) and torch.all(labels.eq(A)):
patient_counter += 1
else:
_UpperCAmelCase = 0
_UpperCAmelCase = logits
if patient_counter == self.patience:
break
_UpperCAmelCase = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
'''Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ''' , UpperCamelCase_ , )
class __lowerCAmelCase ( UpperCamelCase_ ):
def __init__( self : List[Any] , A : Any) -> List[str]:
"""simple docstring"""
super().__init__(A)
_UpperCAmelCase = config.num_labels
_UpperCAmelCase = BertModelWithPabee(A)
_UpperCAmelCase = nn.Dropout(config.hidden_dropout_prob)
_UpperCAmelCase = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels) for _ in range(config.num_hidden_layers)])
self.init_weights()
@add_start_docstrings_to_model_forward(A)
def _lowerCamelCase ( self : Any , A : str=None , A : Optional[Any]=None , A : Union[str, Any]=None , A : List[str]=None , A : List[str]=None , A : Any=None , A : Optional[Any]=None , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = self.bert(
input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_UpperCAmelCase = (logits[-1],)
if labels is not None:
_UpperCAmelCase = None
_UpperCAmelCase = 0
for ix, logits_item in enumerate(A):
if self.num_labels == 1:
# We are doing regression
_UpperCAmelCase = MSELoss()
_UpperCAmelCase = loss_fct(logits_item.view(-1) , labels.view(-1))
else:
_UpperCAmelCase = CrossEntropyLoss()
_UpperCAmelCase = loss_fct(logits_item.view(-1 , self.num_labels) , labels.view(-1))
if total_loss is None:
_UpperCAmelCase = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_UpperCAmelCase = (total_loss / total_weights,) + outputs
return outputs
| 719
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 0
|
UpperCAmelCase__ = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 720
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase__ = logging.get_logger(__name__)
def A ( _UpperCAmelCase : List[str] ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowerCamelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowerCamelCase__ ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class __lowerCAmelCase ( A ):
UpperCamelCase = ["""pixel_values"""]
def __init__( self : Dict , A : bool = True , A : Dict[str, int] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_24}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : Optional[Any] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BILINEAR , A : Optional[Union[str, ChannelDimension]] = None , **A : Tuple , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(A , size['shortest_edge'] , default_to_square=A)
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['height'], size['width'])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}")
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Tuple , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Any , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Dict , A : np.ndarray , A : Union[int, float] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Optional[int] , A : ImageInput , A : bool = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : bool = None , A : float = None , A : bool = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = to_numpy_array(A)
if do_resize:
_UpperCAmelCase = self.resize(image=A , size=A , resample=A)
if do_center_crop:
_UpperCAmelCase = self.center_crop(A , size=A)
if do_rescale:
_UpperCAmelCase = self.rescale(image=A , scale=A)
if do_normalize:
_UpperCAmelCase = self.normalize(image=A , mean=A , std=A)
_UpperCAmelCase = to_channel_dimension_format(A , A)
return image
def _lowerCamelCase ( self : Dict , A : ImageInput , A : bool = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : bool = None , A : float = None , A : bool = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : ChannelDimension = ChannelDimension.FIRST , **A : str , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
_UpperCAmelCase = make_batched(A)
_UpperCAmelCase = [
[
self._preprocess_image(
image=A , do_resize=A , size=A , resample=A , do_center_crop=A , crop_size=A , do_rescale=A , rescale_factor=A , do_normalize=A , image_mean=A , image_std=A , data_format=A , )
for img in video
]
for video in videos
]
_UpperCAmelCase = {'pixel_values': videos}
return BatchFeature(data=A , tensor_type=A)
| 721
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 0
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
UpperCAmelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def A ( _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
if isinstance(_UpperCAmelCase , torch.Tensor ):
return image
elif isinstance(_UpperCAmelCase , PIL.Image.Image ):
_UpperCAmelCase = [image]
_UpperCAmelCase = [trans(img.convert('RGB' ) ) for img in image]
_UpperCAmelCase = torch.stack(_UpperCAmelCase )
return image
class __lowerCAmelCase ( A ):
def __init__( self : List[Any] , A : Optional[Any] , A : int) -> Optional[Any]:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase = DDIMScheduler.from_config(scheduler.config)
self.register_modules(unet=A , scheduler=A)
def _lowerCamelCase ( self : Tuple , A : Any) -> List[str]:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(F"The value of strength should in [0.0, 1.0] but is {strength}")
def _lowerCamelCase ( self : List[str] , A : Any , A : Tuple , A : Dict) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = min(int(num_inference_steps * strength) , A)
_UpperCAmelCase = max(num_inference_steps - init_timestep , 0)
_UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCamelCase ( self : Dict , A : List[str] , A : Any , A : List[Any] , A : List[str] , A : str , A : Any=None) -> int:
"""simple docstring"""
if not isinstance(A , (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(A)}")
_UpperCAmelCase = image.to(device=A , dtype=A)
if isinstance(A , A) and len(A) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(A)}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators.")
_UpperCAmelCase = init_latents.shape
_UpperCAmelCase = randn_tensor(A , generator=A , device=A , dtype=A)
# get latents
print('add noise to latents at timestep' , A)
_UpperCAmelCase = self.scheduler.add_noise(A , A , A)
_UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , A : Union[torch.FloatTensor, PIL.Image.Image] = None , A : float = 0.8 , A : int = 1 , A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A : float = 0.0 , A : int = 50 , A : Optional[bool] = None , A : Optional[str] = "pil" , A : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(A)
# 2. Preprocess image
_UpperCAmelCase = preprocess(A)
# 3. set timesteps
self.scheduler.set_timesteps(A , device=self.device)
_UpperCAmelCase , _UpperCAmelCase = self.get_timesteps(A , A , self.device)
_UpperCAmelCase = timesteps[:1].repeat(A)
# 4. Prepare latent variables
_UpperCAmelCase = self.prepare_latents(A , A , A , self.unet.dtype , self.device , A)
_UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(A):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(A , A).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(
A , A , A , eta=A , use_clipped_model_output=A , generator=A , ).prev_sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1)
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(A)
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=A)
| 700
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 639
| 0
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 701
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while second != 0:
_UpperCAmelCase = first & second
first ^= second
_UpperCAmelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ = int(input("Enter the first number: ").strip())
UpperCAmelCase__ = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 639
| 0
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __lowerCAmelCase ( A ):
UpperCamelCase = 4_2
UpperCamelCase = 4_2
class __lowerCAmelCase ( A , A ):
UpperCamelCase = 1
@register_to_config
def __init__( self : List[str] , A : int = 20_00 , A : float = 0.1_5 , A : float = 0.0_1 , A : float = 13_48.0 , A : float = 1E-5 , A : int = 1 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = sigma_max
# setable values
_UpperCAmelCase = None
self.set_sigmas(A , A , A , A)
def _lowerCamelCase ( self : int , A : torch.FloatTensor , A : Optional[int] = None) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _lowerCamelCase ( self : Union[str, Any] , A : int , A : float = None , A : Union[str, torch.device] = None) -> Any:
"""simple docstring"""
_UpperCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
_UpperCAmelCase = torch.linspace(1 , A , A , device=A)
def _lowerCamelCase ( self : List[Any] , A : int , A : float = None , A : float = None , A : float = None) -> str:
"""simple docstring"""
_UpperCAmelCase = sigma_min if sigma_min is not None else self.config.sigma_min
_UpperCAmelCase = sigma_max if sigma_max is not None else self.config.sigma_max
_UpperCAmelCase = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(A , A)
_UpperCAmelCase = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
_UpperCAmelCase = torch.exp(torch.linspace(math.log(A) , math.log(A) , A))
_UpperCAmelCase = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def _lowerCamelCase ( self : Dict , A : Optional[Any] , A : Union[str, Any]) -> str:
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device)) , self.discrete_sigmas[timesteps - 1].to(timesteps.device) , )
def _lowerCamelCase ( self : int , A : torch.FloatTensor , A : int , A : torch.FloatTensor , A : Optional[torch.Generator] = None , A : bool = True , ) -> Union[SdeVeOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
_UpperCAmelCase = timestep * torch.ones(
sample.shape[0] , device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
_UpperCAmelCase = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
_UpperCAmelCase = timesteps.to(self.discrete_sigmas.device)
_UpperCAmelCase = self.discrete_sigmas[timesteps].to(sample.device)
_UpperCAmelCase = self.get_adjacent_sigma(A , A).to(sample.device)
_UpperCAmelCase = torch.zeros_like(A)
_UpperCAmelCase = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
_UpperCAmelCase = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
_UpperCAmelCase = diffusion.unsqueeze(-1)
_UpperCAmelCase = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
_UpperCAmelCase = randn_tensor(
sample.shape , layout=sample.layout , generator=A , device=sample.device , dtype=sample.dtype)
_UpperCAmelCase = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
_UpperCAmelCase = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=A , prev_sample_mean=A)
def _lowerCamelCase ( self : Dict , A : torch.FloatTensor , A : torch.FloatTensor , A : Optional[torch.Generator] = None , A : bool = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
_UpperCAmelCase = randn_tensor(sample.shape , layout=sample.layout , generator=A).to(sample.device)
# compute step size from the model_output, the noise, and the snr
_UpperCAmelCase = torch.norm(model_output.reshape(model_output.shape[0] , -1) , dim=-1).mean()
_UpperCAmelCase = torch.norm(noise.reshape(noise.shape[0] , -1) , dim=-1).mean()
_UpperCAmelCase = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
_UpperCAmelCase = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
_UpperCAmelCase = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
_UpperCAmelCase = step_size.unsqueeze(-1)
_UpperCAmelCase = sample + step_size * model_output
_UpperCAmelCase = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A)
def _lowerCamelCase ( self : List[str] , A : torch.FloatTensor , A : torch.FloatTensor , A : torch.FloatTensor , ) -> torch.FloatTensor:
"""simple docstring"""
_UpperCAmelCase = timesteps.to(original_samples.device)
_UpperCAmelCase = self.discrete_sigmas.to(original_samples.device)[timesteps]
_UpperCAmelCase = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(A) * sigmas[:, None, None, None]
)
_UpperCAmelCase = noise + original_samples
return noisy_samples
def __len__( self : List[Any]) -> str:
"""simple docstring"""
return self.config.num_train_timesteps
| 702
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( _UpperCAmelCase : str , _UpperCAmelCase : complex , _UpperCAmelCase : str = "x" , _UpperCAmelCase : float = 10**-10 , _UpperCAmelCase : int = 1 , ) -> complex:
'''simple docstring'''
_UpperCAmelCase = symbols(_UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = lambdify(_UpperCAmelCase , diff(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_UpperCAmelCase ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_UpperCAmelCase ) / diff_function(
_UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 639
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCAmelCase__ = "Create a default config file for Accelerate with only a few flags set."
def A ( _UpperCAmelCase : Optional[int]="no" , _UpperCAmelCase : str = default_json_config_file , _UpperCAmelCase : bool = False ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = Path(_UpperCAmelCase )
path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
if path.exists():
print(
F"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." )
return False
_UpperCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" )
_UpperCAmelCase = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_UpperCAmelCase = torch.cuda.device_count()
_UpperCAmelCase = num_gpus
_UpperCAmelCase = False
if num_gpus > 1:
_UpperCAmelCase = 'MULTI_GPU'
else:
_UpperCAmelCase = 'NO'
elif is_xpu_available() and use_xpu:
_UpperCAmelCase = torch.xpu.device_count()
_UpperCAmelCase = num_xpus
_UpperCAmelCase = False
if num_xpus > 1:
_UpperCAmelCase = 'MULTI_XPU'
else:
_UpperCAmelCase = 'NO'
elif is_npu_available():
_UpperCAmelCase = torch.npu.device_count()
_UpperCAmelCase = num_npus
_UpperCAmelCase = False
if num_npus > 1:
_UpperCAmelCase = 'MULTI_NPU'
else:
_UpperCAmelCase = 'NO'
else:
_UpperCAmelCase = 0
_UpperCAmelCase = True
_UpperCAmelCase = 1
_UpperCAmelCase = 'NO'
_UpperCAmelCase = ClusterConfig(**_UpperCAmelCase )
config.to_json_file(_UpperCAmelCase )
return path
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = parser.add_parser('default' , parents=_UpperCAmelCase , help=_UpperCAmelCase , formatter_class=_UpperCAmelCase )
parser.add_argument(
'--config_file' , default=_UpperCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=_UpperCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=_UpperCAmelCase )
return parser
def A ( _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"accelerate configuration saved at {config_file}" )
| 703
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCAmelCase__ = logging.get_logger(__name__)
class __lowerCAmelCase ( A ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : Any , A : bool = True , A : Optional[Dict[str, int]] = None , A : PILImageResampling = PILImageResampling.BILINEAR , A : bool = True , A : Dict[str, int] = None , A : bool = True , A : Union[int, float] = 1 / 2_55 , A : bool = True , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , **A : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = size if size is not None else {'shortest_edge': 2_56}
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self : List[str] , A : np.ndarray , A : Dict[str, int] , A : PILImageResampling = PILImageResampling.BICUBIC , A : Optional[Union[str, ChannelDimension]] = None , **A : List[str] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}")
_UpperCAmelCase = get_resize_output_image_size(A , size=size['shortest_edge'] , default_to_square=A)
return resize(A , size=A , resample=A , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : Dict[str, int] , A : Optional[Union[str, ChannelDimension]] = None , **A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(A)
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(A , size=(size['height'], size['width']) , data_format=A , **A)
def _lowerCamelCase ( self : Any , A : np.ndarray , A : float , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict) -> np.ndarray:
"""simple docstring"""
return rescale(A , scale=A , data_format=A , **A)
def _lowerCamelCase ( self : int , A : np.ndarray , A : Union[float, List[float]] , A : Union[float, List[float]] , A : Optional[Union[str, ChannelDimension]] = None , **A : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(A , mean=A , std=A , data_format=A , **A)
def _lowerCamelCase ( self : Union[str, Any] , A : ImageInput , A : Optional[bool] = None , A : Dict[str, int] = None , A : PILImageResampling = None , A : bool = None , A : Dict[str, int] = None , A : Optional[bool] = None , A : Optional[float] = None , A : Optional[bool] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[float, List[float]]] = None , A : Optional[Union[str, TensorType]] = None , A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A : int , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(A , default_to_square=A)
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(A , param_name='crop_size')
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(A)
if not valid_images(A):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(A) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=A , size=A , resample=A) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=A , size=A) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=A , scale=A) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=A , mean=A , std=A) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(A , A) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=A , tensor_type=A)
def _lowerCamelCase ( self : str , A : Any , A : List[Tuple] = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A) != len(A):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(A):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(A)):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=A)
_UpperCAmelCase = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(A)
else:
_UpperCAmelCase = logits.argmax(dim=1)
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 639
| 0
|
def A ( _UpperCAmelCase : int = 50 ) -> int:
'''simple docstring'''
_UpperCAmelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 704
|
import unittest
from knapsack import knapsack as k
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 0)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 5)
def _lowerCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 1_00, 1_20]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(A)
self.assertEqual(k.knapsack(A , A , A , A) , 2_20)
if __name__ == "__main__":
unittest.main()
| 639
| 0
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCAmelCase__ = "\\n\n"
UpperCAmelCase__ = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
UpperCAmelCase__ = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string'),
}) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def _lowerCamelCase ( self : Optional[Any] , A : Tuple , A : Union[str, Any] , A : int = 16 , A : bool = True , A : str=None) -> Tuple:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCAmelCase = 'cuda'
else:
_UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(A)
_UpperCAmelCase = model.to(A)
_UpperCAmelCase = AutoTokenizer.from_pretrained(A)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(A) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCAmelCase = model.config.max_length - 1
else:
_UpperCAmelCase = model.config.max_length
_UpperCAmelCase = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors='pt' , return_attention_mask=A , ).to(A)
_UpperCAmelCase = encodings['input_ids']
_UpperCAmelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCAmelCase = []
_UpperCAmelCase = CrossEntropyLoss(reduction='none')
for start_index in logging.tqdm(range(0 , len(A) , A)):
_UpperCAmelCase = min(start_index + batch_size , len(A))
_UpperCAmelCase = encoded_texts[start_index:end_index]
_UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(A)
_UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
_UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(A), attn_mask] , dim=1)
_UpperCAmelCase = encoded_batch
with torch.no_grad():
_UpperCAmelCase = model(A , attention_mask=A).logits
_UpperCAmelCase = out_logits[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = attn_mask[..., 1:].contiguous()
_UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , A) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A)}
| 705
|
import qiskit
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
_UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 639
| 0
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = CLIPTokenizer
UpperCamelCase = CLIPTokenizerFast
UpperCamelCase = True
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[str]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
_UpperCAmelCase = {'unk_token': '<unk>'}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(A))
def _lowerCamelCase ( self : Dict , **A : Any) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : str , **A : List[Any]) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : Optional[Any] , A : int) -> str:
"""simple docstring"""
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = 'lower newer'
return input_text, output_text
def _lowerCamelCase ( self : int) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
_UpperCAmelCase = tokenizer.tokenize(A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A) , A)
@require_ftfy
def _lowerCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = self.tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(A , **A)
_UpperCAmelCase = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_UpperCAmelCase = 'xa\u0303y' + ' ' + 'x\xe3y'
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
# Test that the tokenization is identical on unicode of space type
_UpperCAmelCase = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
# Test that the tokenization is identical on unicode of line break type
_UpperCAmelCase = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_UpperCAmelCase = tokenizer_s.tokenize(A)
_UpperCAmelCase = tokenizer_r.tokenize(A)
self.assertListEqual(A , A)
def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCAmelCase = F"{text_of_1_token} {text_of_1_token}"
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , )
_UpperCAmelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A)
self.assertEqual(encoding.offset_mapping[0] , (0, len(A)))
self.assertEqual(
encoding.offset_mapping[1] , (len(A) + 1, len(A) + 1 + len(A)) , )
_UpperCAmelCase = F" {text}"
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , )
_UpperCAmelCase = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A)
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A)))
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A) + 1, 1 + len(A) + 1 + len(A)) , )
def _lowerCamelCase ( self : Any) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(A) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer')
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.'))
@require_ftfy
def _lowerCamelCase ( self : List[str]) -> List[Any]:
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
pass
| 706
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Dict , A : List[Any] , A : str=7 , A : Tuple=3 , A : Dict=18 , A : int=30 , A : Optional[Any]=4_00 , A : int=True , A : List[Any]=None , A : int=True , ) -> int:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'height': 18, 'width': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = apply_ocr
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
_UpperCAmelCase = LayoutLMvaImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A , 'do_resize'))
self.assertTrue(hasattr(A , 'size'))
self.assertTrue(hasattr(A , 'apply_ocr'))
def _lowerCamelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'height': 18, 'width': 18})
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {'height': 42, 'width': 42})
def _lowerCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A)
for image in image_inputs:
self.assertIsInstance(A , Image.Image)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt')
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A)
self.assertIsInstance(encoding.boxes , A)
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A)
for image in image_inputs:
self.assertIsInstance(A , np.ndarray)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A)
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _lowerCamelCase ( self : List[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
_UpperCAmelCase = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test')
_UpperCAmelCase = Image.open(ds[0]['file']).convert('RGB')
_UpperCAmelCase = image_processing(A , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24))
self.assertEqual(len(encoding.words) , len(encoding.boxes))
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_UpperCAmelCase = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_UpperCAmelCase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A)
self.assertListEqual(encoding.boxes , A)
# with apply_OCR = False
_UpperCAmelCase = LayoutLMvaImageProcessor(apply_ocr=A)
_UpperCAmelCase = image_processing(A , return_tensors='pt')
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24))
| 707
|
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639
| 0
|
def A ( _UpperCAmelCase : list , _UpperCAmelCase : int = 0 ) -> list:
'''simple docstring'''
_UpperCAmelCase = length or len(_UpperCAmelCase )
_UpperCAmelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = list_data[i + 1], list_data[i]
_UpperCAmelCase = True
return list_data if not swapped else bubble_sort(_UpperCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
import os
# Precomputes a list of the 100 first triangular numbers
UpperCAmelCase__ = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , 'words.txt' )
_UpperCAmelCase = ''
with open(_UpperCAmelCase ) as f:
_UpperCAmelCase = f.readline()
_UpperCAmelCase = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
_UpperCAmelCase = [
word
for word in [sum(ord(_UpperCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_UpperCAmelCase )
if __name__ == "__main__":
print(solution())
| 639
| 0
|
from __future__ import annotations
UpperCAmelCase__ = tuple[int, int, int]
UpperCAmelCase__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
UpperCAmelCase__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
# -------------------------- default selection --------------------------
# rotors --------------------------
UpperCAmelCase__ = "EGZWVONAHDCLFQMSIPJBYUKXTR"
UpperCAmelCase__ = "FOBHMDKEXQNRAULPGSJVTYICZW"
UpperCAmelCase__ = "ZJXESIUQLHAVRMDOYGTNFWPBKC"
# reflector --------------------------
UpperCAmelCase__ = {
"A": "N",
"N": "A",
"B": "O",
"O": "B",
"C": "P",
"P": "C",
"D": "Q",
"Q": "D",
"E": "R",
"R": "E",
"F": "S",
"S": "F",
"G": "T",
"T": "G",
"H": "U",
"U": "H",
"I": "V",
"V": "I",
"J": "W",
"W": "J",
"K": "X",
"X": "K",
"L": "Y",
"Y": "L",
"M": "Z",
"Z": "M",
}
# -------------------------- extra rotors --------------------------
UpperCAmelCase__ = "RMDJXFUWGISLHVTCQNKYPBEZOA"
UpperCAmelCase__ = "SGLCPQWZHKXAREONTFBVIYJUDM"
UpperCAmelCase__ = "HVSICLTYKQUBXDWAJZOMFGPREN"
UpperCAmelCase__ = "RZWQHFMVDBKICJLNTUXAGYPSOE"
UpperCAmelCase__ = "LFKIJODBEGAMQPXVUHYSTCZRWN"
UpperCAmelCase__ = "KOAEGVDHXPQZMLFTYWJNBRCIUS"
def A ( _UpperCAmelCase : RotorPositionT , _UpperCAmelCase : RotorSelectionT , _UpperCAmelCase : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
'''simple docstring'''
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_UpperCAmelCase ) )) < 3:
_UpperCAmelCase = F"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(_UpperCAmelCase )
# Checks if rotor positions are valid
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rotpos
if not 0 < rotorposa <= len(_UpperCAmelCase ):
_UpperCAmelCase = F"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
_UpperCAmelCase = F"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
_UpperCAmelCase = F"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_UpperCAmelCase )
# Validates string and returns dict
_UpperCAmelCase = _plugboard(_UpperCAmelCase )
return rotpos, rotsel, pbdict
def A ( _UpperCAmelCase : str ) -> dict[str, str]:
'''simple docstring'''
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = F"Plugboard setting isn't type string ({type(_UpperCAmelCase )})"
raise TypeError(_UpperCAmelCase )
elif len(_UpperCAmelCase ) % 2 != 0:
_UpperCAmelCase = F"Odd number of symbols ({len(_UpperCAmelCase )})"
raise Exception(_UpperCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
_UpperCAmelCase = set()
for i in pbstring:
if i not in abc:
_UpperCAmelCase = F"'{i}' not in list of symbols"
raise Exception(_UpperCAmelCase )
elif i in tmppbl:
_UpperCAmelCase = F"Duplicate symbol ({i})"
raise Exception(_UpperCAmelCase )
else:
tmppbl.add(_UpperCAmelCase )
del tmppbl
# Created the dictionary
_UpperCAmelCase = {}
for j in range(0 , len(_UpperCAmelCase ) - 1 , 2 ):
_UpperCAmelCase = pbstring[j + 1]
_UpperCAmelCase = pbstring[j]
return pb
def A ( _UpperCAmelCase : str , _UpperCAmelCase : RotorPositionT , _UpperCAmelCase : RotorSelectionT = (rotora, rotora, rotora) , _UpperCAmelCase : str = "" , ) -> str:
'''simple docstring'''
_UpperCAmelCase = text.upper()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = _validator(
_UpperCAmelCase , _UpperCAmelCase , plugb.upper() )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rotor_position
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_UpperCAmelCase = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_UpperCAmelCase = plugboard[symbol]
# rotor ra --------------------------
_UpperCAmelCase = abc.index(_UpperCAmelCase ) + rotorposa
_UpperCAmelCase = rotora[index % len(_UpperCAmelCase )]
# rotor rb --------------------------
_UpperCAmelCase = abc.index(_UpperCAmelCase ) + rotorposa
_UpperCAmelCase = rotora[index % len(_UpperCAmelCase )]
# rotor rc --------------------------
_UpperCAmelCase = abc.index(_UpperCAmelCase ) + rotorposa
_UpperCAmelCase = rotora[index % len(_UpperCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_UpperCAmelCase = reflector[symbol]
# 2nd rotors
_UpperCAmelCase = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
_UpperCAmelCase = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
_UpperCAmelCase = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_UpperCAmelCase = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
_UpperCAmelCase = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
_UpperCAmelCase = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
_UpperCAmelCase = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = "This is my Python script that emulates the Enigma machine from WWII."
UpperCAmelCase__ = (1, 1, 1)
UpperCAmelCase__ = "pictures"
UpperCAmelCase__ = (rotora, rotora, rotora)
UpperCAmelCase__ = enigma(message, rotor_pos, rotor_sel, pb)
print("Encrypted message:", en)
print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
| 709
|
def A ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = str(bin(_UpperCAmelCase ) )[2:] # remove the leading "0b"
_UpperCAmelCase = max(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCAmelCase ) , b_binary.zfill(_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 0
|
from maths.prime_factors import prime_factors
def A ( _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase = F"Input value of [number={number}] must be an integer"
raise TypeError(_UpperCAmelCase )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(_UpperCAmelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711
|
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(A )} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def _lowerCamelCase ( self : Any) -> Dict:
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path')
@dataclass
class __lowerCAmelCase :
UpperCamelCase = field(
default=A , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=A , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=A , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=A , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def _lowerCamelCase ( self : Dict) -> Union[str, Any]:
"""simple docstring"""
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split('.')[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
_UpperCAmelCase = [json.loads(_UpperCAmelCase ) for line in f.read().splitlines() if (len(_UpperCAmelCase ) > 0 and not line.isspace())]
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(_UpperCAmelCase )
def A ( ) -> Optional[Any]:
'''simple docstring'''
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _UpperCAmelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[:{data_args.validation_split_percentage}%]" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"train[{data_args.validation_split_percentage}%:]" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split('.' )[-1]
if extension == "txt":
_UpperCAmelCase = 'text'
_UpperCAmelCase = load_dataset(_UpperCAmelCase , data_files=_UpperCAmelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(F"New config: {config}" )
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **_UpperCAmelCase )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **_UpperCAmelCase )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(_UpperCAmelCase )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets['train'].column_names
else:
_UpperCAmelCase = datasets['validation'].column_names
_UpperCAmelCase = 'text' if 'text' in column_names else column_names[0]
_UpperCAmelCase = 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_UpperCAmelCase : str ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples['text'] if len(_UpperCAmelCase ) > 0 and not line.isspace()]
return tokenizer(examples['text'] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets['train'] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets['validation'] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=_UpperCAmelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=_UpperCAmelCase , args=_UpperCAmelCase , train_dataset=tokenized_datasets['train'] if training_args.do_train else None , eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None , tokenizer=_UpperCAmelCase , data_collator=_UpperCAmelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=_UpperCAmelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , 'train_results.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output['eval_loss'] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , 'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
return results
def A ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 639
| 0
|
from math import factorial
def A ( _UpperCAmelCase : int = 20 ) -> int:
'''simple docstring'''
_UpperCAmelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_UpperCAmelCase = n // 2
return int(factorial(_UpperCAmelCase ) / (factorial(_UpperCAmelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCAmelCase__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 712
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
UpperCAmelCase__ = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
UpperCAmelCase__ = "▁"
# Segments (not really needed)
UpperCAmelCase__ = 0
UpperCAmelCase__ = 1
UpperCAmelCase__ = 2
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
class __lowerCAmelCase ( A ):
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = '''left'''
UpperCamelCase = XLNetTokenizer
def __init__( self : Any , A : Union[str, Any]=None , A : str=None , A : Tuple=False , A : Tuple=True , A : Any=False , A : List[str]="<s>" , A : List[str]="</s>" , A : Optional[int]="<unk>" , A : Tuple="<sep>" , A : str="<pad>" , A : Dict="<cls>" , A : Dict="<mask>" , A : Optional[Any]=["<eop>", "<eod>"] , **A : Optional[Any] , ) -> str:
"""simple docstring"""
_UpperCAmelCase = AddedToken(A , lstrip=A , rstrip=A) if isinstance(A , A) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , additional_special_tokens=A , **A , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self : Tuple , A : List[int] , A : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowerCamelCase ( self : List[str] , A : str , A : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(A):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
_UpperCAmelCase = os.path.join(
A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(A):
copyfile(self.vocab_file , A)
return (out_vocab_file,)
| 639
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple="pt" ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=_UpperCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=None , ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(_UpperCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( A ):
def __init__( self : Union[str, Any] , A : Union[str, Any] , A : Optional[int] , A : str , A : Union[str, Any] , A : int="train" , A : List[Any]=None , A : int=None , A : Tuple=None , A : str="" , ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(A).joinpath(type_path + '.source')
_UpperCAmelCase = Path(A).joinpath(type_path + '.target')
_UpperCAmelCase = self.get_char_lens(self.src_file)
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Tuple) -> Optional[int]:
"""simple docstring"""
return len(self.src_lens)
def __getitem__( self : Any , A : Dict) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file) , A).rstrip('\n')
_UpperCAmelCase = linecache.getline(str(self.tgt_file) , A).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer , A):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , A) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , A) else self.tokenizer
_UpperCAmelCase = encode_line(A , A , self.max_source_length , 'right')
_UpperCAmelCase = encode_line(A , A , self.max_target_length , 'right')
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _lowerCamelCase ( A : str) -> Tuple:
"""simple docstring"""
return [len(A) for x in Path(A).open().readlines()]
def _lowerCamelCase ( self : int , A : int) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch])
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch])
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch])
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , A)
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(A , A)
_UpperCAmelCase , _UpperCAmelCase = trim_batch(A , A , attention_mask=A)
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def A ( _UpperCAmelCase : List[List] ) -> Union[str, Any]:
'''simple docstring'''
return list(itertools.chain.from_iterable(_UpperCAmelCase ) )
def A ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'git_log.json' ) )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int]=4 , **_UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , 'w' ) as f:
json.dump(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase , **_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
with open(_UpperCAmelCase ) as f:
return json.load(_UpperCAmelCase )
def A ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=_UpperCAmelCase )
_UpperCAmelCase = {
'repo_id': str(_UpperCAmelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def A ( _UpperCAmelCase : Callable , _UpperCAmelCase : Iterable ) -> List:
'''simple docstring'''
return list(map(_UpperCAmelCase , _UpperCAmelCase ) )
def A ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
with open(_UpperCAmelCase , 'wb' ) as f:
return pickle.dump(_UpperCAmelCase , _UpperCAmelCase )
def A ( _UpperCAmelCase : int ) -> str:
'''simple docstring'''
def remove_articles(_UpperCAmelCase : Optional[int] ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _UpperCAmelCase )
def white_space_fix(_UpperCAmelCase : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_UpperCAmelCase : Tuple ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_UpperCAmelCase : str ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_UpperCAmelCase ) ) ) )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = normalize_answer(_UpperCAmelCase ).split()
_UpperCAmelCase = Counter(_UpperCAmelCase ) & Counter(_UpperCAmelCase )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = 1.0 * num_same / len(_UpperCAmelCase )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return normalize_answer(_UpperCAmelCase ) == normalize_answer(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
_UpperCAmelCase = 0
for hypo, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
em += exact_match_score(_UpperCAmelCase , _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
em /= len(_UpperCAmelCase )
return {"em": em}
def A ( _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
return model_prefix.startswith('rag' )
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not hasattr(_UpperCAmelCase , _UpperCAmelCase ) and not hasattr(_UpperCAmelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
continue
_UpperCAmelCase = p if hasattr(_UpperCAmelCase , _UpperCAmelCase ) else equivalent_param[p]
setattr(_UpperCAmelCase , _UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
delattr(_UpperCAmelCase , _UpperCAmelCase )
return hparams, config
| 713
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["YolosFeatureExtractor"]
UpperCAmelCase__ = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str ) -> float:
'''simple docstring'''
def get_matched_characters(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCAmelCase = int(max(0 , i - limit ) )
_UpperCAmelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
_UpperCAmelCase = F"{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}"
return "".join(_UpperCAmelCase )
# matching characters
_UpperCAmelCase = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = len(_UpperCAmelCase )
# transposition
_UpperCAmelCase = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
_UpperCAmelCase = 0.0
else:
_UpperCAmelCase = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCAmelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 714
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCAmelCase__ = re.compile(r"\s+")
def A ( _UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def A ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = [len(_UpperCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )}
def A ( _UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict=5 ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ['auto-generated', 'autogenerated', 'automatically generated']
_UpperCAmelCase = example['content'].splitlines()
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : Optional[int]=0.05 ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['unit tests', 'test file', 'configuration file']
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example['content'].count('\n' )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def A ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = ['def ', 'class ', 'for ', 'while ']
_UpperCAmelCase = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=4 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = example['content'].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def A ( _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids']
_UpperCAmelCase = len(example['content'] ) / len(_UpperCAmelCase )
return {"ratio": ratio}
def A ( _UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
results.update(get_hash(_UpperCAmelCase ) )
results.update(line_stats(_UpperCAmelCase ) )
results.update(alpha_stats(_UpperCAmelCase ) )
results.update(char_token_ratio(_UpperCAmelCase ) )
results.update(is_autogenerated(_UpperCAmelCase ) )
results.update(is_config_or_test(_UpperCAmelCase ) )
results.update(has_no_keywords(_UpperCAmelCase ) )
results.update(has_few_assignments(_UpperCAmelCase ) )
return results
def A ( _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def A ( _UpperCAmelCase : Optional[Any] ) -> Any:
'''simple docstring'''
with open(_UpperCAmelCase , 'rb' ) as f_in:
with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
os.unlink(_UpperCAmelCase )
# Settings
UpperCAmelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCAmelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase__ = multiprocessing.cpu_count()
UpperCAmelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = load_dataset(args.dataset_name, split="train")
print(f"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
UpperCAmelCase__ = set(ds.unique("hash"))
UpperCAmelCase__ = len(uniques) / len(ds)
print(f"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
UpperCAmelCase__ = time.time()
UpperCAmelCase__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(f"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCAmelCase__ = time.time()
UpperCAmelCase__ , UpperCAmelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(f"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
UpperCAmelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCAmelCase__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCAmelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCAmelCase__ = str(data_dir / f"""file-{file_number+1:012}.json""")
UpperCAmelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
| 639
| 0
|
def _A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_UpperCAmelCase = mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
_UpperCAmelCase = max(
mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , mf_knapsack(i - 1 , _UpperCAmelCase , _UpperCAmelCase , j - wt[i - 1] ) + val[i - 1] , )
_UpperCAmelCase = val
return f[i][j]
def _A ( _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_UpperCAmelCase = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_UpperCAmelCase = dp[i - 1][w_]
return dp[n][w_], dp
def _A ( _UpperCAmelCase : int , _UpperCAmelCase : list , _UpperCAmelCase : list ) -> Any:
'''simple docstring'''
if not (isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
_UpperCAmelCase = len(_UpperCAmelCase )
if num_items != len(_UpperCAmelCase ):
_UpperCAmelCase = (
'The number of weights must be the same as the number of values.\n'
F"But got {num_items} weights and {len(_UpperCAmelCase )} values"
)
raise ValueError(_UpperCAmelCase )
for i in range(_UpperCAmelCase ):
if not isinstance(wt[i] , _UpperCAmelCase ):
_UpperCAmelCase = (
'All weights must be integers but got weight of '
F"type {type(wt[i] )} at index {i}"
)
raise TypeError(_UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = set()
_construct_solution(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return optimal_val, example_optional_set
def _A ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : set ) -> str:
'''simple docstring'''
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_UpperCAmelCase , _UpperCAmelCase , i - 1 , _UpperCAmelCase , _UpperCAmelCase )
else:
optimal_set.add(_UpperCAmelCase )
_construct_solution(_UpperCAmelCase , _UpperCAmelCase , i - 1 , j - wt[i - 1] , _UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = [3, 2, 4, 4]
UpperCAmelCase__ = [4, 3, 2, 3]
UpperCAmelCase__ = 4
UpperCAmelCase__ = 6
UpperCAmelCase__ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
UpperCAmelCase__ , UpperCAmelCase__ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
UpperCAmelCase__ , UpperCAmelCase__ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 715
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
UpperCAmelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
_UpperCAmelCase = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
_UpperCAmelCase = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
_UpperCAmelCase = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
_UpperCAmelCase = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def A ( _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
import re
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
_UpperCAmelCase = re.compile(
R'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
_UpperCAmelCase = re.compile(R'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_conv_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_conv_in.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_encoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_encoder_block_proj_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_UpperCAmelCase = re_encoder_block_proj_out.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_decoder_block_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_decoder_block_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_UpperCAmelCase = re_decoder_block_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_conv_out.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_conv_out.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_resnet.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase = {'1': 1, '3': 2}[groups[-2]]
_UpperCAmelCase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_UpperCAmelCase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_UpperCAmelCase = prefix + resnet_block
_UpperCAmelCase = re_prior_cond_resnet.sub(_UpperCAmelCase , _UpperCAmelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCAmelCase ):
_UpperCAmelCase = re_prior_cond_proj_in.match(_UpperCAmelCase )
_UpperCAmelCase = regex_match.groups()
_UpperCAmelCase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_UpperCAmelCase = re_prior_cond_proj_in.sub(_UpperCAmelCase , _UpperCAmelCase )
# keep original key
else:
_UpperCAmelCase = original_key
_UpperCAmelCase = replace_key(_UpperCAmelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_UpperCAmelCase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_UpperCAmelCase = original_key
_UpperCAmelCase = original_key
_UpperCAmelCase = value
return new_dict
@torch.no_grad()
def A ( _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict=None ) -> Dict:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_UpperCAmelCase = requests.get(F"{PREFIX}{file}" , allow_redirects=_UpperCAmelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_UpperCAmelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , 'wb' ).write(r.content )
_UpperCAmelCase = MODEL_MAPPING[model_name.split('/' )[-1]]
_UpperCAmelCase = JukeboxConfig.from_pretrained(_UpperCAmelCase )
_UpperCAmelCase = JukeboxModel(_UpperCAmelCase )
_UpperCAmelCase = []
_UpperCAmelCase = {}
for i, dict_name in enumerate(_UpperCAmelCase ):
_UpperCAmelCase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['model']
_UpperCAmelCase = {}
for k in old_dic.keys():
if k.endswith('.b' ):
_UpperCAmelCase = old_dic[k]
elif k.endswith('.w' ):
_UpperCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase = old_dic[k]
else:
_UpperCAmelCase = old_dic[k]
_UpperCAmelCase = 'vqvae' if i == 0 else F"priors.{3 - i}"
_UpperCAmelCase = fix_jukebox_keys(_UpperCAmelCase , model.state_dict() , _UpperCAmelCase , _UpperCAmelCase )
weight_dict.append(_UpperCAmelCase )
_UpperCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , 'w' ) as txtfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCAmelCase )
return weight_dict
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
UpperCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 639
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def A ( _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __lowerCAmelCase ( A , A , A , unittest.TestCase ):
UpperCamelCase = StableDiffusionLatentUpscalePipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase = frozenset([] )
UpperCamelCase = True
@property
def _lowerCamelCase ( self : List[Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = 4
_UpperCAmelCase = (16, 16)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(A)
return image
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
torch.manual_seed(0)
_UpperCAmelCase = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=A , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=1_60 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=A , only_cross_attention=A , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
_UpperCAmelCase = EulerDiscreteScheduler(prediction_type='sample')
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='quick_gelu' , projection_dim=5_12 , )
_UpperCAmelCase = CLIPTextModel(A)
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_UpperCAmelCase = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def _lowerCamelCase ( self : List[Any] , A : Union[str, Any] , A : List[Any]=0) -> List[str]:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**A)
pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = self.get_dummy_inputs(A)
_UpperCAmelCase = pipe(**A).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 2_56, 2_56, 3))
_UpperCAmelCase = np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5])
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(A , 1E-3)
def _lowerCamelCase ( self : Dict) -> Any:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3)
def _lowerCamelCase ( self : Any) -> Tuple:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3)
def _lowerCamelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def _lowerCamelCase ( self : Any) -> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=7E-3)
def _lowerCamelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3)
def _lowerCamelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3)
def _lowerCamelCase ( self : Any) -> List[Any]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3)
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**A)
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=A)
pipe.to(A)
pipe.set_progress_bar_config(disable=A)
_UpperCAmelCase = self.get_dummy_inputs(A)
_UpperCAmelCase = 2
_UpperCAmelCase = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_UpperCAmelCase = getattr(A , scheduler_enum.name)
_UpperCAmelCase = scheduler_cls.from_config(pipe.scheduler.config)
_UpperCAmelCase = pipe(**A)[0]
outputs.append(A)
assert check_same_shape(A)
@require_torch_gpu
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(33)
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa)
pipe.to('cuda')
_UpperCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
_UpperCAmelCase = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
_UpperCAmelCase = pipe(A , generator=A , output_type='latent').images
_UpperCAmelCase = upscaler(
prompt=A , image=A , num_inference_steps=20 , guidance_scale=0 , generator=A , output_type='np' , ).images[0]
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy')
assert np.abs((expected_image - image).mean()) < 5E-2
def _lowerCamelCase ( self : Dict) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = torch.manual_seed(33)
_UpperCAmelCase = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa)
upscaler.to('cuda')
_UpperCAmelCase = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png')
_UpperCAmelCase = upscaler(
prompt=A , image=A , num_inference_steps=20 , guidance_scale=0 , generator=A , output_type='np' , ).images[0]
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy')
assert np.abs((expected_image - image).max()) < 5E-2
| 716
|
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowerCAmelCase :
@staticmethod
def _lowerCamelCase ( *A : Union[str, Any] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A) , [
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}],
[{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}],
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@require_tf
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf')
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(A) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
[
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
{'score': 0.3_3_3, 'label': ANY(A)},
],
] , )
@slow
@require_torch
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
_UpperCAmelCase = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf')
# This is an image of 2 cats with remotes and no planes
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
_UpperCAmelCase = image_classifier(A , candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(A) , [
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
] , )
_UpperCAmelCase = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2)
self.assertEqual(
nested_simplify(A) , [
[
{'score': 0.5_1_1, 'label': 'remote'},
{'score': 0.4_8_5, 'label': 'cat'},
{'score': 0.0_0_4, 'label': 'plane'},
],
]
* 5 , )
| 639
| 0
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __lowerCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self : str) -> str:
"""simple docstring"""
_UpperCAmelCase = get_activation('swish')
self.assertIsInstance(A , nn.SiLU)
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def _lowerCamelCase ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = get_activation('silu')
self.assertIsInstance(A , nn.SiLU)
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = get_activation('mish')
self.assertIsInstance(A , nn.Mish)
self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
_UpperCAmelCase = get_activation('gelu')
self.assertIsInstance(A , nn.GELU)
self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa)).item() , 0)
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa)).item() , 0)
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa)).item() , 20)
| 717
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ = {
"configuration_ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig"],
"tokenization_ctrl": ["CTRLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"CTRLForSequenceClassification",
"CTRLLMHeadModel",
"CTRLModel",
"CTRLPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCTRLForSequenceClassification",
"TFCTRLLMHeadModel",
"TFCTRLModel",
"TFCTRLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 639
| 0
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ = 5
UpperCAmelCase__ = 10
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = SpeechaTextTokenizer
UpperCamelCase = False
UpperCamelCase = True
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
super().setUp()
_UpperCAmelCase = sp.SentencePieceProcessor()
spm_model.Load(A)
_UpperCAmelCase = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(A))]
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = Path(self.tmpdirname)
save_json(A , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(A , save_dir / VOCAB_FILES_NAMES['spm_file'])
_UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A) , A)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A) , A)
def _lowerCamelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(A) , 10_01)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01)
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
_UpperCAmelCase = tokenizer.tokenize('This is a test')
self.assertListEqual(A , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A) , [2_89, 50, 14, 1_74, 3_86] , )
_UpperCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
A , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
self.assertListEqual(A , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8])
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertListEqual(
A , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class __lowerCAmelCase ( unittest.TestCase ):
UpperCamelCase = '''valhalla/s2t_mustc_multilinguial_medium'''
UpperCamelCase = '''C\'est trop cool'''
UpperCamelCase = '''Esto es genial'''
@classmethod
def _lowerCamelCase ( cls : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def _lowerCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def _lowerCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00)
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
self.assertIn(A , self.tokenizer.all_special_ids)
_UpperCAmelCase = [ES_CODE, 4, 16_01, 47, 76_47, 2]
_UpperCAmelCase = self.tokenizer.decode(A , skip_special_tokens=A)
_UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A)
self.assertEqual(A , A)
self.assertNotIn(self.tokenizer.eos_token , A)
def _lowerCamelCase ( self : str) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 'fr'
_UpperCAmelCase = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , A)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def _lowerCamelCase ( self : Optional[Any]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
_UpperCAmelCase = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 718
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def A ( _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase = filter(lambda _UpperCAmelCase : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase__ = logging.getLogger(__name__)
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
if metric == "rouge2":
_UpperCAmelCase = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_UpperCAmelCase = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_UpperCAmelCase = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_UpperCAmelCase = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_UpperCAmelCase , filename=_UpperCAmelCase , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_UpperCAmelCase , verbose=_UpperCAmelCase , )
class __lowerCAmelCase ( pl.Callback ):
def _lowerCamelCase ( self : Optional[int] , A : List[Any] , A : int) -> Dict:
"""simple docstring"""
_UpperCAmelCase = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(A)
@rank_zero_only
def _lowerCamelCase ( self : Optional[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : int=True) -> None:
"""simple docstring"""
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****")
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']})
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir)
if type_path == "test":
_UpperCAmelCase = od / 'test_results.txt'
_UpperCAmelCase = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_UpperCAmelCase = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=A)
generations_file.parent.mkdir(exist_ok=A)
with open(A , 'a+') as writer:
for key in sorted(A):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(A , torch.Tensor):
_UpperCAmelCase = val.item()
_UpperCAmelCase = F"{key}: {val:.6f}\n"
writer.write(A)
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = '\n'.join(metrics['preds'])
generations_file.open('w+').write(A)
@rank_zero_only
def _lowerCamelCase ( self : str , A : Optional[int] , A : List[str]) -> Optional[Any]:
"""simple docstring"""
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(A)
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6})
@rank_zero_only
def _lowerCamelCase ( self : Dict , A : pl.Trainer , A : pl.LightningModule) -> int:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(A , A , 'test')
@rank_zero_only
def _lowerCamelCase ( self : Tuple , A : pl.Trainer , A : str) -> Dict:
"""simple docstring"""
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 639
| 0
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 719
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = MgpstrTokenizer
UpperCamelCase = False
UpperCamelCase = {}
UpperCamelCase = False
def _lowerCamelCase ( self : int) -> List[Any]:
"""simple docstring"""
super().setUp()
# fmt: off
_UpperCAmelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_UpperCAmelCase = dict(zip(A , range(len(A))))
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(A) + '\n')
def _lowerCamelCase ( self : Dict , **A : List[Any]) -> Optional[Any]:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A)
def _lowerCamelCase ( self : List[str] , A : Optional[int]) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'tester'
_UpperCAmelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _lowerCamelCase ( self : Optional[Any]) -> int:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Optional[int]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers(do_lower_case=A)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
_UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=A)
self.assertEqual(len(A) , 1)
_UpperCAmelCase = tokenizer.decode(A , skip_special_tokens=A)
self.assertTrue(special_token not in decoded)
def _lowerCamelCase ( self : Any) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
_UpperCAmelCase , _UpperCAmelCase = self.get_input_output_texts(A)
_UpperCAmelCase = tokenizer.tokenize(A)
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(A)
_UpperCAmelCase = tokenizer.encode(A , add_special_tokens=A)
self.assertListEqual(A , A)
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(A)
self.assertNotEqual(len(A) , 0)
_UpperCAmelCase = tokenizer.decode(A)
self.assertIsInstance(A , A)
self.assertEqual(text_a.replace(' ' , '') , A)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
pass
| 639
| 0
|
from __future__ import annotations
from typing import TypedDict
class __lowerCAmelCase ( A ):
UpperCamelCase = 4_2
UpperCamelCase = 4_2
def A ( _UpperCAmelCase : str ) -> list[str]:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(_UpperCAmelCase ) )]
def A ( _UpperCAmelCase : str ) -> BWTTransformDict:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
_UpperCAmelCase = all_rotations(_UpperCAmelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_UpperCAmelCase = {
'bwt_string': ''.join([word[-1] for word in rotations] ),
'idx_original_string': rotations.index(_UpperCAmelCase ),
}
return response
def A ( _UpperCAmelCase : str , _UpperCAmelCase : int ) -> str:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
_UpperCAmelCase = int(_UpperCAmelCase )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(_UpperCAmelCase ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
_UpperCAmelCase = [''] * len(_UpperCAmelCase )
for _ in range(len(_UpperCAmelCase ) ):
for i in range(len(_UpperCAmelCase ) ):
_UpperCAmelCase = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCAmelCase__ = "Provide a string that I will generate its BWT transform: "
UpperCAmelCase__ = input(entry_msg).strip()
UpperCAmelCase__ = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result["bwt_string"]}'"""
)
UpperCAmelCase__ = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
f"""we get original string '{original_string}'"""
)
| 720
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = {"facebook/bart-base": BartForConditionalGeneration}
UpperCAmelCase__ = {"facebook/bart-base": BartTokenizer}
def A ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=_UpperCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=_UpperCAmelCase , default=_UpperCAmelCase , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_UpperCAmelCase , )
parser.add_argument(
'--config_name' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=_UpperCAmelCase , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='Where to store the final ONNX file.' )
_UpperCAmelCase = parser.parse_args()
return args
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]="cpu" ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = model_dict[model_name].from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_UpperCAmelCase )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_UpperCAmelCase ) )
with torch.no_grad():
_UpperCAmelCase = 'My friends are cool but they eat too many carbs.'
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors='pt' ).to(model.device )
_UpperCAmelCase = model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=_UpperCAmelCase , max_length=_UpperCAmelCase , early_stopping=_UpperCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_UpperCAmelCase , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _UpperCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=_UpperCAmelCase , )
logger.info('Model exported to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_UpperCAmelCase ) )
logger.info('Deduplicated and optimized model written to {}'.format(_UpperCAmelCase ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_UpperCAmelCase )
_UpperCAmelCase = ort_sess.run(
_UpperCAmelCase , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(_UpperCAmelCase ),
'max_length': np.array(_UpperCAmelCase ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def A ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase , _UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _UpperCAmelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(_UpperCAmelCase )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = 'BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 639
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , A : Tuple , A : int=7 , A : str=3 , A : List[Any]=30 , A : str=4_00 , A : Tuple=True , A : str=None , A : Tuple=True , A : List[str]=[0.5, 0.5, 0.5] , A : int=[0.5, 0.5, 0.5] , A : List[str]=True , A : List[str]=1 / 2_55 , A : List[Any]=True , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_pad
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self : Tuple , A : int , A : int=False) -> Dict:
"""simple docstring"""
if not batched:
_UpperCAmelCase = image_inputs[0]
if isinstance(A , Image.Image):
_UpperCAmelCase , _UpperCAmelCase = image.size
else:
_UpperCAmelCase , _UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase = int(self.size['shortest_edge'] * h / w)
_UpperCAmelCase = self.size['shortest_edge']
elif w > h:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = int(self.size['shortest_edge'] * w / h)
else:
_UpperCAmelCase = self.size['shortest_edge']
_UpperCAmelCase = self.size['shortest_edge']
else:
_UpperCAmelCase = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
_UpperCAmelCase = max(A , key=lambda A: item[0])[0]
_UpperCAmelCase = max(A , key=lambda A: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = DeformableDetrImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = DeformableDetrImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : List[str]) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(A , 'image_mean'))
self.assertTrue(hasattr(A , 'image_std'))
self.assertTrue(hasattr(A , 'do_normalize'))
self.assertTrue(hasattr(A , 'do_resize'))
self.assertTrue(hasattr(A , 'do_rescale'))
self.assertTrue(hasattr(A , 'do_pad'))
self.assertTrue(hasattr(A , 'size'))
def _lowerCamelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33})
self.assertEqual(image_processor.do_pad , A)
_UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A)
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84})
self.assertEqual(image_processor.do_pad , A)
def _lowerCamelCase ( self : List[str]) -> Dict:
"""simple docstring"""
pass
def _lowerCamelCase ( self : List[str]) -> str:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A)
for image in image_inputs:
self.assertIsInstance(A , Image.Image)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A , batched=A)
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A)
for image in image_inputs:
self.assertIsInstance(A , np.ndarray)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A , batched=A)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A)
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor)
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values
_UpperCAmelCase , _UpperCAmelCase = self.image_processor_tester.get_expected_values(A , batched=A)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f:
_UpperCAmelCase = json.loads(f.read())
_UpperCAmelCase = {'image_id': 3_97_69, 'annotations': target}
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor()
_UpperCAmelCase = image_processing(images=A , annotations=A , return_tensors='pt')
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , A)
_UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A , atol=1E-4))
# verify area
_UpperCAmelCase = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A))
# verify boxes
_UpperCAmelCase = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , A)
_UpperCAmelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A , atol=1E-3))
# verify image_id
_UpperCAmelCase = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A))
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A))
# verify class_labels
_UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A))
# verify orig_size
_UpperCAmelCase = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A))
# verify size
_UpperCAmelCase = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A))
@slow
def _lowerCamelCase ( self : Tuple) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f:
_UpperCAmelCase = json.loads(f.read())
_UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
_UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
_UpperCAmelCase = DeformableDetrImageProcessor(format='coco_panoptic')
_UpperCAmelCase = image_processing(images=A , annotations=A , masks_path=A , return_tensors='pt')
# verify pixel values
_UpperCAmelCase = torch.Size([1, 3, 8_00, 10_66])
self.assertEqual(encoding['pixel_values'].shape , A)
_UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A , atol=1E-4))
# verify area
_UpperCAmelCase = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A))
# verify boxes
_UpperCAmelCase = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape , A)
_UpperCAmelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A , atol=1E-3))
# verify image_id
_UpperCAmelCase = torch.tensor([3_97_69])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A))
# verify is_crowd
_UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A))
# verify class_labels
_UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A))
# verify masks
_UpperCAmelCase = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , A)
# verify orig_size
_UpperCAmelCase = torch.tensor([4_80, 6_40])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A))
# verify size
_UpperCAmelCase = torch.tensor([8_00, 10_66])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A))
| 721
|
def A ( _UpperCAmelCase : list ) -> list:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return lst
_UpperCAmelCase = 1
while i < len(_UpperCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase = 1
return lst
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 639
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.