code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
def SCREAMING_SNAKE_CASE_ ( __A : int ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(__A , __A ):
raise TypeError('Input value must be a \'int\' type' )
return bin(__A ).count('1' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCamelCase__ : List[Any] = '''src/transformers'''
lowerCamelCase__ : Union[str, Any] = '''docs/source/en'''
lowerCamelCase__ : Optional[int] = '''.'''
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[int] ) -> Union[str, Any]:
with open(_lowerCAmelCase, """r""", encoding="""utf-8""", newline="""\n""" ) as f:
_UpperCAmelCase : str = f.readlines()
# Find the start prompt.
_UpperCAmelCase : Dict = 0
while not lines[start_index].startswith(_lowerCAmelCase ):
start_index += 1
start_index += 1
_UpperCAmelCase : List[Any] = start_index
while not lines[end_index].startswith(_lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCamelCase__ : Dict = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
lowerCamelCase__ : Union[str, Any] = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
lowerCamelCase__ : Optional[int] = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCamelCase__ : Any = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ : Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
def UpperCamelCase ( _lowerCAmelCase : Union[str, Any] ) -> Any:
_UpperCAmelCase : Optional[int] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""", _lowerCAmelCase )
return [m.group(0 ) for m in matches]
def UpperCamelCase ( _lowerCAmelCase : Any, _lowerCAmelCase : int ) -> Any:
_UpperCAmelCase : Union[str, Any] = 2 if text == """✅""" or text == """❌""" else len(_lowerCAmelCase )
_UpperCAmelCase : str = (width - text_length) // 2
_UpperCAmelCase : List[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def UpperCamelCase ( ) -> List[Any]:
_UpperCAmelCase : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_UpperCAmelCase : List[Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_UpperCAmelCase : int = {name: config.replace("""Config""", """""" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_UpperCAmelCase : Dict = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[str] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : List[str] = collections.defaultdict(_lowerCAmelCase )
_UpperCAmelCase : str = collections.defaultdict(_lowerCAmelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_lowerCAmelCase ):
_UpperCAmelCase : List[str] = None
if attr_name.endswith("""Tokenizer""" ):
_UpperCAmelCase : Optional[int] = slow_tokenizers
_UpperCAmelCase : Optional[int] = attr_name[:-9]
elif attr_name.endswith("""TokenizerFast""" ):
_UpperCAmelCase : List[Any] = fast_tokenizers
_UpperCAmelCase : str = attr_name[:-13]
elif _re_tf_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Tuple = tf_models
_UpperCAmelCase : Any = _re_tf_models.match(_lowerCAmelCase ).groups()[0]
elif _re_flax_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Any = flax_models
_UpperCAmelCase : List[Any] = _re_flax_models.match(_lowerCAmelCase ).groups()[0]
elif _re_pt_models.match(_lowerCAmelCase ) is not None:
_UpperCAmelCase : Union[str, Any] = pt_models
_UpperCAmelCase : List[Any] = _re_pt_models.match(_lowerCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_lowerCAmelCase ) > 0:
if attr_name in model_name_to_prefix.values():
_UpperCAmelCase : List[str] = True
break
# Try again after removing the last word in the name
_UpperCAmelCase : Optional[Any] = """""".join(camel_case_split(_lowerCAmelCase )[:-1] )
# Let's build that table!
_UpperCAmelCase : List[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_UpperCAmelCase : List[Any] = ["""Model""", """Tokenizer slow""", """Tokenizer fast""", """PyTorch support""", """TensorFlow support""", """Flax Support"""]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_UpperCAmelCase : List[Any] = [len(_lowerCAmelCase ) + 2 for c in columns]
_UpperCAmelCase : Optional[int] = max([len(_lowerCAmelCase ) for name in model_names] ) + 2
# Build the table per se
_UpperCAmelCase : Tuple = """|""" + """|""".join([_center_text(_lowerCAmelCase, _lowerCAmelCase ) for c, w in zip(_lowerCAmelCase, _lowerCAmelCase )] ) + """|\n"""
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([""":""" + """-""" * (w - 2) + """:""" for w in widths] ) + "|\n"
_UpperCAmelCase : Dict = {True: """✅""", False: """❌"""}
for name in model_names:
_UpperCAmelCase : Optional[int] = model_name_to_prefix[name]
_UpperCAmelCase : Tuple = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_lowerCAmelCase, _lowerCAmelCase ) for l, w in zip(_lowerCAmelCase, _lowerCAmelCase )] ) + "|\n"
return table
def UpperCamelCase ( _lowerCAmelCase : Any=False ) -> Dict:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = _find_text_in_file(
filename=os.path.join(_lowerCAmelCase, """index.md""" ), start_prompt="""<!--This table is updated automatically from the auto modules""", end_prompt="""<!-- End table-->""", )
_UpperCAmelCase : List[str] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_lowerCAmelCase, """index.md""" ), """w""", encoding="""utf-8""", newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"""The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.""" )
if __name__ == "__main__":
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase__ : List[Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 246 | 0 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
A_ = True
from torch.cuda.amp import autocast
A_ = logging.getLogger(__name__)
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"help": "Whether to log verbose messages or not."} , )
lowercase__ = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
lowercase__ = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
lowercase__ = field(
default=0.99_9995 , metadata={"help": "Decay of gumbel temperature during training."} )
def UpperCAmelCase__ (snake_case__ : ModelArguments , snake_case__ : TrainingArguments ):
"""simple docstring"""
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
_snake_case : Any = logging.WARNING
if model_args.verbose_logging:
_snake_case : Any = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_snake_case : Any = logging.INFO
logger.setLevel(snake_case__ )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
default=lowerCamelCase__ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase__ = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
lowercase__ = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
lowercase__ = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowercase__ = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
lowercase__ = field(
default=lowerCamelCase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowercase__ = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
lowercase__ = "longest"
lowercase__ = None
lowercase__ = None
def __call__( self: int, a_: List[Dict[str, Union[List[int], torch.Tensor]]] ):
'''simple docstring'''
_snake_case : List[Any] = self.feature_extractor.pad(
__A, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="""pt""", )
_snake_case : int = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
_snake_case : Tuple = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_snake_case : Tuple = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
_snake_case : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
_snake_case : Tuple = 1
_snake_case : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
_snake_case : Dict = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__A, min_masks=2, )
return batch
class lowercase( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self: Union[str, Any], *a_: int, a_: Dict=1, a_: Any=0, a_: Optional[Any]=1.0, **a_: Any ):
'''simple docstring'''
super().__init__(*__A, **__A )
_snake_case : Any = 0
_snake_case : Any = max_gumbel_temp
_snake_case : Optional[Any] = min_gumbel_temp
_snake_case : str = gumbel_temp_decay
def UpperCamelCase_ ( self: Dict, a_: nn.Module, a_: Dict[str, Union[torch.Tensor, Any]] ):
'''simple docstring'''
model.train()
_snake_case : List[Any] = self._prepare_inputs(__A )
if self.use_amp:
with autocast():
_snake_case : Optional[Any] = self.compute_loss(__A, __A )
else:
_snake_case : Optional[int] = self.compute_loss(__A, __A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_snake_case : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_snake_case : str = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']" )
if self.args.gradient_accumulation_steps > 1:
_snake_case : Any = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__A ).backward()
elif self.use_apex:
with amp.scale_loss(__A, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
return loss.detach()
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_snake_case : List[Any] = parser.parse_args_into_dataclasses()
configure_logger(snake_case__ , snake_case__ )
# Downloading and loading a dataset from the hub.
_snake_case : int = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_snake_case : Union[str, Any] = DatasetDict()
_snake_case : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_snake_case : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_snake_case : Optional[Any] = DatasetDict()
_snake_case : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
_snake_case : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_snake_case : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case__ )
def prepare_dataset(snake_case__ : Dict ):
# check that all files have the correct sampling rate
_snake_case : Optional[Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_snake_case : str = datasets.map(
snake_case__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
_snake_case : int = vectorized_datasets.filter(
lambda snake_case__ : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(snake_case__ : Dict ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_snake_case : Any = vectorized_datasets.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_snake_case : Optional[int] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm=\'layer\'""" )
_snake_case : Any = WavaVecaForPreTraining(snake_case__ )
_snake_case : int = DataCollatorForWavaVecaPretraining(model=snake_case__ , feature_extractor=snake_case__ )
_snake_case : Any = WavaVecaPreTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=snake_case__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 361 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int = 10_00 ):
"""simple docstring"""
_snake_case , _snake_case : List[Any] = 1, 1
_snake_case : str = []
for i in range(1 , n + 1 ):
_snake_case : Any = prev_numerator + 2 * prev_denominator
_snake_case : Optional[Any] = prev_numerator + prev_denominator
if len(str(snake_case__ ) ) > len(str(snake_case__ ) ):
result.append(snake_case__ )
_snake_case : int = numerator
_snake_case : Any = denominator
return len(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 132 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
_snake_case = ['''image_processor''', '''tokenizer''']
_snake_case = '''BlipImageProcessor'''
_snake_case = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , snake_case_ , snake_case_ ) -> Tuple:
__lowerCAmelCase = False
super().__init__(snake_case_ , snake_case_ )
__lowerCAmelCase = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
__lowerCAmelCase = self.tokenizer
__lowerCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
__lowerCAmelCase = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
__lowerCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
__lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def A__ ( self , *snake_case_ , **snake_case_ ) -> Optional[int]:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def A__ ( self , *snake_case_ , **snake_case_ ) -> int:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def A__ ( self ) -> Tuple:
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 301 |
"""simple docstring"""
from __future__ import annotations
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def lowercase (_lowerCAmelCase ):
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 301 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( a ):
def __init__( self : List[Any] , _UpperCamelCase : AutoencoderKL , _UpperCamelCase : CLIPTextModel , _UpperCamelCase : CLIPTokenizer , _UpperCamelCase : UNetaDConditionModel , _UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _UpperCamelCase : StableDiffusionSafetyChecker , _UpperCamelCase : CLIPImageProcessor , ) -> List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , unet=_UpperCamelCase , scheduler=_UpperCamelCase , safety_checker=_UpperCamelCase , feature_extractor=_UpperCamelCase , )
def __snake_case( self : Dict , _UpperCamelCase : Optional[Union[str, int]] = "auto" ) -> int:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCamelCase )
def __snake_case( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
self.enable_attention_slicing(_UpperCamelCase )
@torch.no_grad()
def __call__( self : Tuple , _UpperCamelCase : Union[str, List[str]] , _UpperCamelCase : int = 512 , _UpperCamelCase : int = 512 , _UpperCamelCase : int = 50 , _UpperCamelCase : float = 7.5 , _UpperCamelCase : Optional[Union[str, List[str]]] = None , _UpperCamelCase : Optional[int] = 1 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : Optional[torch.Generator] = None , _UpperCamelCase : Optional[torch.FloatTensor] = None , _UpperCamelCase : Optional[str] = "pil" , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _UpperCamelCase : int = 1 , _UpperCamelCase : Optional[torch.FloatTensor] = None , **_UpperCamelCase : Optional[Any] , ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = 1
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = len(_UpperCamelCase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCamelCase , _UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_UpperCamelCase )}." )
# get prompt text embeddings
SCREAMING_SNAKE_CASE = self.tokenizer(
_UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = text_embeddings.shape
SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , _UpperCamelCase , 1 )
SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , _UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
SCREAMING_SNAKE_CASE = [""]
elif type(_UpperCamelCase ) is not type(_UpperCamelCase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(_UpperCamelCase )} !="
F" {type(_UpperCamelCase )}." )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(_UpperCamelCase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(_UpperCamelCase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE = self.tokenizer(
_UpperCamelCase , padding="max_length" , max_length=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors="pt" , )
SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(_UpperCamelCase , _UpperCamelCase , 1 )
SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , _UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
SCREAMING_SNAKE_CASE = torch.randn(
_UpperCamelCase , generator=_UpperCamelCase , device="cpu" , dtype=_UpperCamelCase ).to(self.device )
SCREAMING_SNAKE_CASE = torch.randn(_UpperCamelCase , generator=_UpperCamelCase , device="cpu" , dtype=_UpperCamelCase ).to(
self.device )
else:
SCREAMING_SNAKE_CASE = torch.randn(
_UpperCamelCase , generator=_UpperCamelCase , device=self.device , dtype=_UpperCamelCase )
SCREAMING_SNAKE_CASE = torch.randn(_UpperCamelCase , generator=_UpperCamelCase , device=self.device , dtype=_UpperCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
SCREAMING_SNAKE_CASE = latents_reference.to(self.device )
SCREAMING_SNAKE_CASE = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
SCREAMING_SNAKE_CASE = (latents_shape[3] - latents_shape_reference[3]) // 2
SCREAMING_SNAKE_CASE = (latents_shape[2] - latents_shape_reference[2]) // 2
SCREAMING_SNAKE_CASE = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
SCREAMING_SNAKE_CASE = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
SCREAMING_SNAKE_CASE = 0 if dx < 0 else dx
SCREAMING_SNAKE_CASE = 0 if dy < 0 else dy
SCREAMING_SNAKE_CASE = max(-dx , 0 )
SCREAMING_SNAKE_CASE = max(-dy , 0 )
# import pdb
# pdb.set_trace()
SCREAMING_SNAKE_CASE = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE = {}
if accepts_eta:
SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
# predict the noise residual
SCREAMING_SNAKE_CASE = self.unet(_UpperCamelCase , _UpperCamelCase , encoder_hidden_states=_UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = 1 / 0.1_8_2_1_5 * latents
SCREAMING_SNAKE_CASE = self.vae.decode(_UpperCamelCase ).sample
SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE = self.feature_extractor(self.numpy_to_pil(_UpperCamelCase ) , return_tensors="pt" ).to(
self.device )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.safety_checker(
images=_UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
SCREAMING_SNAKE_CASE = None
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_UpperCamelCase , nsfw_content_detected=_UpperCamelCase )
| 350 | from __future__ import annotations
from collections.abc import Iterator
class lowercase :
def __init__( self : str , _UpperCamelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
class lowercase :
def __init__( self : str , _UpperCamelCase : Node ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tree
def __snake_case( self : int , _UpperCamelCase : Node | None ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : List[Any] ) -> Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206 | 0 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __lowerCamelCase ( A__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = torch.exp(A__ )
UpperCamelCase = torch.sum(A__ , dim=1 ) # sum of exp(x_i)
UpperCamelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(A__ ) - B / A
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Any ):
"""simple docstring"""
super().__init__()
UpperCamelCase = config.output_attentions
UpperCamelCase = config.output_hidden_states
UpperCamelCase = nn.ModuleList([BertLayer(UpperCamelCase__ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = nn.ModuleList([BertHighway(UpperCamelCase__ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = [-1 for _ in range(config.num_hidden_layers )]
def A ( self : Dict , UpperCamelCase__ : str ):
"""simple docstring"""
if (type(UpperCamelCase__ ) is float) or (type(UpperCamelCase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
UpperCamelCase = x
else:
UpperCamelCase = x
def A ( self : Union[str, Any] , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def A ( self : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[Any]=None , ):
"""simple docstring"""
UpperCamelCase = ()
UpperCamelCase = ()
UpperCamelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = layer_module(
UpperCamelCase__ , UpperCamelCase__ , head_mask[i] , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = layer_outputs[0]
if self.output_attentions:
UpperCamelCase = all_attentions + (layer_outputs[1],)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = current_outputs + (all_attentions,)
UpperCamelCase = self.highway[i](UpperCamelCase__ )
# logits, pooled_output
if not self.training:
UpperCamelCase = highway_exit[0]
UpperCamelCase = entropy(UpperCamelCase__ )
UpperCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
UpperCamelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
UpperCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(UpperCamelCase__ , i + 1 )
else:
UpperCamelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = outputs + (all_attentions,)
UpperCamelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , _a , )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Any , UpperCamelCase__ : Any ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
UpperCamelCase = config
UpperCamelCase = BertEmbeddings(UpperCamelCase__ )
UpperCamelCase = DeeBertEncoder(UpperCamelCase__ )
UpperCamelCase = BertPooler(UpperCamelCase__ )
self.init_weights()
def A ( self : int ):
"""simple docstring"""
self.encoder.init_highway_pooler(self.pooler )
def A ( self : List[Any] ):
"""simple docstring"""
return self.embeddings.word_embeddings
def A ( self : List[str] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = value
def A ( self : Any , UpperCamelCase__ : List[str] ):
"""simple docstring"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(UpperCamelCase__ )
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def A ( self : Optional[Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[int]=None , ):
"""simple docstring"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
UpperCamelCase = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ )
if encoder_attention_mask is None:
UpperCamelCase = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ )
if token_type_ids is None:
UpperCamelCase = torch.zeros(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
UpperCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
UpperCamelCase = encoder_attention_mask[:, None, None, :]
UpperCamelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
UpperCamelCase = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase = self.get_head_mask(UpperCamelCase__ , self.config.num_hidden_layers )
UpperCamelCase = self.embeddings(
input_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ )
UpperCamelCase = self.encoder(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(UpperCamelCase__ )
UpperCamelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = message
UpperCamelCase = exit_layer # start from 1!
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
UpperCamelCase = BertPooler(UpperCamelCase__ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , config.num_labels )
def A ( self : Dict , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(UpperCamelCase__ )
# "return" pooler_output
# BertModel
UpperCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
UpperCamelCase = bmodel_output[1]
UpperCamelCase = self.dropout(UpperCamelCase__ )
UpperCamelCase = self.classifier(UpperCamelCase__ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
super().__init__(UpperCamelCase__ )
UpperCamelCase = config.num_labels
UpperCamelCase = config.num_hidden_layers
UpperCamelCase = DeeBertModel(UpperCamelCase__ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
def A ( self : str , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : str=-1 , UpperCamelCase__ : Union[str, Any]=False , ):
"""simple docstring"""
UpperCamelCase = self.num_layers
try:
UpperCamelCase = self.bert(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
UpperCamelCase = outputs[1]
UpperCamelCase = self.dropout(UpperCamelCase__ )
UpperCamelCase = self.classifier(UpperCamelCase__ )
UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCamelCase = e.message
UpperCamelCase = e.exit_layer
UpperCamelCase = outputs[0]
if not self.training:
UpperCamelCase = entropy(UpperCamelCase__ )
UpperCamelCase = []
UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCamelCase = []
for highway_exit in outputs[-1]:
UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCamelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCamelCase__ )
if train_highway:
UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCamelCase = (loss,) + outputs
if not self.training:
UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 28 |
"""simple docstring"""
from torch import nn
def snake_case ( A__ ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 268 | 0 |
"""simple docstring"""
from __future__ import annotations
import requests
lowerCamelCase__ = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def lowercase__ ( lowercase_ ,lowercase_ = 1 ,lowercase_ = "new" ,lowercase_ = None ) -> dict:
"""simple docstring"""
_UpperCamelCase : str = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ):
_UpperCamelCase : List[Any] = F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(lowercase_ )
_UpperCamelCase : List[Any] = requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' ,headers={"User-agent": "A random string"} ,)
if response.status_code == 429:
raise requests.HTTPError
_UpperCamelCase : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )}
_UpperCamelCase : List[str] = {}
for id_ in range(lowercase_ ):
_UpperCamelCase : Dict = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 310 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCamelCase__ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __SCREAMING_SNAKE_CASE ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase : str = load_tool("text-question-answering" )
self.tool.setup()
_UpperCamelCase : Union[str, Any] = load_tool("text-question-answering" , remote=__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Dict = self.tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.remote_tool(__a , "What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Dict = self.tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : List[Any] = self.remote_tool(text=__a , question="What did Hugging Face do in April 2021?" )
self.assertEqual(__a , "launched the BigScience Research Workshop" )
| 310 | 1 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_A : Tuple = None
_A : Any = logging.get_logger(__name__)
_A : Any = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_A : Any = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
_A : List[str] = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class a__ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase = TaTokenizer
__lowerCAmelCase = []
def __init__( self , _a=None , _a=None , _a="</s>" , _a="<unk>" , _a="<pad>" , _a=100 , _a=None , **_a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase : List[str] = [f"""<extra_id_{i}>""" for i in range(_a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase : List[Any] = len(set(filter(lambda _a : bool("extra_id_" in str(_a ) ) , _a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
super().__init__(
_a , tokenizer_file=_a , eos_token=_a , unk_token=_a , pad_token=_a , extra_ids=_a , additional_special_tokens=_a , **_a , )
lowercase : Any = vocab_file
lowercase : int = False if not self.vocab_file else True
lowercase : List[str] = extra_ids
@staticmethod
def __magic_name__ ( _a , _a , _a ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase : Optional[int] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , _a , )
return max_model_length
def __magic_name__ ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Optional[Any] = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def __magic_name__ ( self , _a , _a = None ):
lowercase : Dict = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase : int = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __magic_name__ ( self , _a , _a = None ):
lowercase : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __magic_name__ ( self ):
return list(
set(filter(lambda _a : bool(re.search(R"<extra_id_\d+>" , _a ) ) is not None , self.additional_special_tokens ) ) )
def __magic_name__ ( self ):
return [self.convert_tokens_to_ids(_a ) for token in self.get_sentinel_tokens()]
| 202 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[Any]=False ) -> Tuple:
lowercase : Union[str, Any] = OmegaConf.load(__snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(__snake_case ) ) )
return config
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None ) -> Tuple:
if conf_path is None:
lowercase : List[Any] = "./model_checkpoints/vqgan_only.yaml"
lowercase : Tuple = load_config(__snake_case , display=__snake_case )
lowercase : List[Any] = VQModel(**config.model.params )
if ckpt_path is None:
lowercase : List[str] = "./model_checkpoints/vqgan_only.pt"
lowercase : Optional[int] = torch.load(__snake_case , map_location=__snake_case )
if ".ckpt" in ckpt_path:
lowercase : str = sd["state_dict"]
model.load_state_dict(__snake_case , strict=__snake_case )
model.to(__snake_case )
del sd
return model
def __magic_name__ ( __snake_case : Tuple , __snake_case : Union[str, Any] ) -> int:
lowercase , lowercase , lowercase : List[Any] = model.encode(__snake_case )
print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
lowercase : str = model.decode(__snake_case )
return xrec
def __magic_name__ ( __snake_case : Dict , __snake_case : Optional[int]=False ) -> int:
lowercase , lowercase : Union[str, Any] = string.rsplit("." , 1 )
if reload:
lowercase : Any = importlib.import_module(__snake_case )
importlib.reload(__snake_case )
return getattr(importlib.import_module(__snake_case , package=__snake_case ) , cls )
def __magic_name__ ( __snake_case : str ) -> List[str]:
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def __magic_name__ ( __snake_case : Any , __snake_case : int , __snake_case : List[Any]=True , __snake_case : Dict=True ) -> str:
lowercase : Optional[int] = instantiate_from_config(__snake_case )
if sd is not None:
model.load_state_dict(__snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : List[str] ) -> Any:
# load the specified checkpoint
if ckpt:
lowercase : Dict = torch.load(__snake_case , map_location="cpu" )
lowercase : List[Any] = pl_sd["global_step"]
print(f"""loaded model from global step {global_step}.""" )
else:
lowercase : int = {"state_dict": None}
lowercase : Optional[Any] = None
lowercase : List[Any] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=__snake_case , eval_mode=__snake_case )["model"]
return model, global_step
| 202 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 8 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowerCAmelCase__ ) for _ in range(lowerCAmelCase__ ) )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : int ):
"""simple docstring"""
i -= len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ = i // 3
SCREAMING_SNAKE_CASE__ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
SCREAMING_SNAKE_CASE__ = (
chars_incl
+ random(lowerCAmelCase__ , quotient + remainder )
+ random(lowerCAmelCase__ , lowerCAmelCase__ )
+ random(lowerCAmelCase__ , lowerCAmelCase__ )
)
SCREAMING_SNAKE_CASE__ = list(lowerCAmelCase__ )
shuffle(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
# random is a generalised function for letters, characters and numbers
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : int ):
"""simple docstring"""
return "".join(secrets.choice(lowerCAmelCase__ ) for _ in range(lowerCAmelCase__ ) )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
"""simple docstring"""
pass # Put your code here...
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
pass # Put your code here...
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : Optional[int] ):
"""simple docstring"""
pass # Put your code here...
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : int = 8 ):
"""simple docstring"""
if len(lowerCAmelCase__ ) < min_length:
# Your Password must be at least 8 characters long
return False
SCREAMING_SNAKE_CASE__ = any(char in ascii_uppercase for char in password )
SCREAMING_SNAKE_CASE__ = any(char in ascii_lowercase for char in password )
SCREAMING_SNAKE_CASE__ = any(char in digits for char in password )
SCREAMING_SNAKE_CASE__ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = int(input("""Please indicate the max length of your password: """ ).strip() )
SCREAMING_SNAKE_CASE__ = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(lowerCAmelCase__ ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(lowerCAmelCase__ , lowerCAmelCase__ ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 366 | import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE__ = {tokenizer_name: getattr(__UpperCamelCase , tokenizer_name + """Fast""" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE__ = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
SCREAMING_SNAKE_CASE__ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(__UpperCamelCase , force_download=__UpperCamelCase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = checkpoint.split("""/""" )
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
elif add_prefix:
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = dump_path
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
SCREAMING_SNAKE_CASE__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
SCREAMING_SNAKE_CASE__ = file_path.split(__UpperCamelCase )[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
SCREAMING_SNAKE_CASE__ = tokenizer.save_pretrained(
__UpperCamelCase , legacy_format=__UpperCamelCase , filename_prefix=__UpperCamelCase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(__UpperCamelCase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
__lowerCamelCase : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 204 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_UpperCamelCase : str = ['''bert-base-uncased''', '''bert-base-cased''']
_UpperCamelCase : Any = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class snake_case__ ( tf.keras.Model):
def __init__( self : Optional[Any] , _A : Dict ) -> Optional[int]:
super().__init__()
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(__UpperCamelCase )
UpperCAmelCase_ : List[str] = TFAutoModel.from_config(__UpperCamelCase )
def A ( self : Tuple , _A : List[str] ) -> Tuple:
UpperCAmelCase_ : str = self.tokenizer(__UpperCamelCase )
UpperCAmelCase_ : List[str] = self.bert(**__UpperCamelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class snake_case__ ( unittest.TestCase):
def A ( self : Union[str, Any] ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase_ : List[str] = [
BertTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCAmelCase_ : Optional[int] = [TFBertTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__UpperCamelCase , use_fast_bert_tokenizer=__UpperCamelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase_ : List[Any] = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCAmelCase_ : str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A ( self : Optional[int] ) -> int:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase_ : List[Any] = tokenizer(__UpperCamelCase , return_tensors='''tf''' , padding='''longest''' )
UpperCAmelCase_ : Optional[Any] = tf_tokenizer(__UpperCamelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def A ( self : List[str] ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase_ : Union[str, Any] = tf_tokenizer(self.paired_sentences )
UpperCAmelCase_ : List[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def A ( self : Optional[Any] ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase_ : List[Any] = tf.function(__UpperCamelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase_ : List[str] = tf.constant(__UpperCamelCase )
UpperCAmelCase_ : str = compiled_tokenizer(__UpperCamelCase )
UpperCAmelCase_ : List[str] = tf_tokenizer(__UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A ( self : List[Any] ) -> int:
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase_ : Optional[Any] = ModelToSave(tokenizer=__UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = tf.convert_to_tensor(self.test_sentences )
UpperCAmelCase_ : Optional[int] = model(__UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase_ : List[str] = Path(__UpperCamelCase ) / 'saved.model'
model.save(__UpperCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.models.load_model(__UpperCamelCase )
UpperCAmelCase_ : List[Any] = loaded_model(__UpperCamelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 304 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ['''MobileViTFeatureExtractor''']
lowerCAmelCase__ : Optional[int] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : str = (DEISMultistepScheduler,)
lowerCamelCase_ : Any = (('num_inference_steps', 25),)
def lowerCAmelCase_ ( self , **lowerCamelCase ) -> List[Any]:
snake_case_ = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**lowerCamelCase )
return config
def lowerCAmelCase_ ( self , lowerCamelCase=0 , **lowerCamelCase ) -> str:
snake_case_ = dict(self.forward_default_kwargs )
snake_case_ = kwargs.pop("""num_inference_steps""" , lowerCamelCase )
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config(**lowerCamelCase )
snake_case_ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
snake_case_ = scheduler_class.from_pretrained(lowerCamelCase )
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ , snake_case_ = sample, sample
for t in range(lowerCamelCase , time_step + scheduler.config.solver_order + 1 ):
snake_case_ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
snake_case_ = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self ) -> Any:
pass
def lowerCAmelCase_ ( self , lowerCamelCase=0 , **lowerCamelCase ) -> int:
snake_case_ = dict(self.forward_default_kwargs )
snake_case_ = kwargs.pop("""num_inference_steps""" , lowerCamelCase )
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase )
snake_case_ = scheduler_class.from_pretrained(lowerCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase )
# copy over dummy past residual (must be after setting timesteps)
snake_case_ = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case_ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
snake_case_ = new_scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowerCAmelCase_ ( self , lowerCamelCase=None , **lowerCamelCase ) -> List[str]:
if scheduler is None:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**lowerCamelCase )
snake_case_ = scheduler_class(**lowerCamelCase )
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(**lowerCamelCase )
snake_case_ = scheduler_class(**lowerCamelCase )
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = model(lowerCamelCase , lowerCamelCase )
snake_case_ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
return sample
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ = dict(self.forward_default_kwargs )
snake_case_ = kwargs.pop("""num_inference_steps""" , lowerCamelCase )
for scheduler_class in self.scheduler_classes:
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**lowerCamelCase )
snake_case_ = self.dummy_sample
snake_case_ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase , """set_timesteps""" ):
scheduler.set_timesteps(lowerCamelCase )
elif num_inference_steps is not None and not hasattr(lowerCamelCase , """set_timesteps""" ):
snake_case_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case_ = [residual + 0.2, residual + 0.15, residual + 0.10]
snake_case_ = dummy_past_residuals[: scheduler.config.solver_order]
snake_case_ = scheduler.timesteps[5]
snake_case_ = scheduler.timesteps[6]
snake_case_ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
snake_case_ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase_ ( self ) -> str:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case_ = DEISMultistepScheduler(**self.get_scheduler_config() )
snake_case_ = self.full_loop(scheduler=lowerCamelCase )
snake_case_ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
snake_case_ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case_ = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case_ = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case_ = DEISMultistepScheduler.from_config(scheduler.config )
snake_case_ = self.full_loop(scheduler=lowerCamelCase )
snake_case_ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def lowerCAmelCase_ ( self ) -> List[Any]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
self.check_over_configs(thresholding=lowerCamelCase )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase , prediction_type=lowerCamelCase , sample_max_value=lowerCamelCase , algorithm_type="""deis""" , solver_order=lowerCamelCase , solver_type=lowerCamelCase , )
def lowerCAmelCase_ ( self ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Tuple:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , algorithm_type=lowerCamelCase , )
snake_case_ = self.full_loop(
solver_order=lowerCamelCase , solver_type=lowerCamelCase , prediction_type=lowerCamelCase , algorithm_type=lowerCamelCase , )
assert not torch.isnan(lowerCamelCase ).any(), "Samples have nan numbers"
def lowerCAmelCase_ ( self ) -> Optional[Any]:
self.check_over_configs(lower_order_final=lowerCamelCase )
self.check_over_configs(lower_order_final=lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCamelCase , time_step=0 )
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case_ = self.full_loop()
snake_case_ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case_ = self.full_loop(prediction_type="""v_prediction""" )
snake_case_ = torch.mean(torch.abs(lowerCamelCase ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(thresholding=lowerCamelCase , dynamic_thresholding_ratio=0 )
snake_case_ = scheduler_class(**lowerCamelCase )
snake_case_ = 10
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = model(lowerCamelCase , lowerCamelCase )
snake_case_ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase ).prev_sample
assert sample.dtype == torch.floataa | 355 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def UpperCamelCase( ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
snake_case_ = Dataset.from_dict(lowercase_ )
return dataset
class __lowerCamelCase ( __snake_case ):
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case_ = get_dataset()
snake_case_ = make_duplicate_clusters(lowerCamelCase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase_ ( self ) -> List[str]:
snake_case_ = get_dataset()
snake_case_ , snake_case_ = deduplicate_dataset(lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) , 2 )
print(lowerCamelCase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , lowerCamelCase ) | 34 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
snake_case_ = 'http://www.mocksite.com/file1.txt'
snake_case_ = '"text": ["foo", "foo"]'
snake_case_ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class SCREAMING_SNAKE_CASE__ :
A_ : Optional[Any] = 200
A_ : List[str] = {'Content-Length': '100'}
A_ : int = {}
def a (self : List[str] , **a__ : Any ):
"""simple docstring"""
return [bytes(a__ , '''utf-8''' )]
def lowerCamelCase__ ( *snake_case_ : List[Any] , **snake_case_ : Optional[int] ) -> Tuple:
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : List[Any] ) -> Optional[int]:
import requests
monkeypatch.setattr(snake_case_ , '''request''' , snake_case_ )
__snake_case = URL
if issubclass(snake_case_ , snake_case_ ):
__snake_case = url
elif issubclass(snake_case_ , snake_case_ ):
__snake_case = [url]
elif issubclass(snake_case_ , snake_case_ ):
__snake_case = {'''train''': url}
__snake_case = '''dummy'''
__snake_case = '''downloads'''
__snake_case = tmp_path
__snake_case = DownloadConfig(
cache_dir=os.path.join(snake_case_ , snake_case_ ) , use_etag=snake_case_ , )
__snake_case = DownloadManager(dataset_name=snake_case_ , download_config=snake_case_ )
__snake_case = dl_manager.download(snake_case_ )
__snake_case = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(snake_case_ , snake_case_ ):
__snake_case = [downloaded_paths]
__snake_case = [urls]
elif isinstance(snake_case_ , snake_case_ ):
assert "train" in downloaded_paths.keys()
__snake_case = downloaded_paths.values()
__snake_case = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(snake_case_ , snake_case_ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__snake_case = Path(snake_case_ )
__snake_case = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__snake_case = downloaded_path.read_text()
assert content == CONTENT
__snake_case = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
__snake_case = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Optional[int] ) -> List[str]:
__snake_case = str(snake_case_ )
if issubclass(snake_case_ , snake_case_ ):
__snake_case = filename
elif issubclass(snake_case_ , snake_case_ ):
__snake_case = [filename]
elif issubclass(snake_case_ , snake_case_ ):
__snake_case = {'''train''': filename}
__snake_case = '''dummy'''
__snake_case = xz_file.parent
__snake_case = '''extracted'''
__snake_case = DownloadConfig(
cache_dir=snake_case_ , use_etag=snake_case_ , )
__snake_case = DownloadManager(dataset_name=snake_case_ , download_config=snake_case_ )
__snake_case = dl_manager.extract(snake_case_ )
__snake_case = paths
for extracted_paths in [extracted_paths]:
if isinstance(snake_case_ , snake_case_ ):
__snake_case = [extracted_paths]
__snake_case = [paths]
elif isinstance(snake_case_ , snake_case_ ):
assert "train" in extracted_paths.keys()
__snake_case = extracted_paths.values()
__snake_case = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(snake_case_ , snake_case_ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__snake_case = Path(snake_case_ )
__snake_case = extracted_path.parts
assert parts[-1] == hash_url_to_filename(snake_case_ , etag=snake_case_ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__snake_case = extracted_path.read_text()
__snake_case = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : str ) -> Dict:
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(snake_case_ , start=1 ):
__snake_case = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Optional[Any] ) -> Any:
__snake_case = request.getfixturevalue(snake_case_ )
__snake_case = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
_test_jsonl(snake_case_ , snake_case_ )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Any ) -> Dict:
__snake_case = request.getfixturevalue(snake_case_ )
__snake_case = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(snake_case_ ) , start=1 ):
_test_jsonl(snake_case_ , snake_case_ )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase__ ( snake_case_ : List[str] ) -> Optional[int]:
__snake_case = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(snake_case_ ) , start=1 ):
assert os.path.basename(snake_case_ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 24 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def a( A : List[str] ) -> List[str]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class _lowercase ( nn.Module ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
super().__init__()
a = module
a = nn.Sequential(
nn.Linear(module.in_features , lowerCamelCase_ , bias=lowerCamelCase_ ) , nn.Linear(lowerCamelCase_ , module.out_features , bias=lowerCamelCase_ ) , )
a = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCamelCase_ )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCamelCase_ (self , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return self.module(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) + self.adapter(lowerCamelCase_ )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
__A = "bigscience/bloom-1b7"
# Constant values
__A = 2.109_659_552_692_574
__A = "Hello my name is"
__A = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
__A = 10
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoTokenizer.from_pretrained(self.model_name )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
a = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
def UpperCamelCase_ (self ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_abit.config
self.assertTrue(hasattr(lowerCamelCase_ , "quantization_config" ) )
a = config.to_dict()
a = config.to_diff_dict()
a = config.to_json_string()
def UpperCamelCase_ (self ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
a = self.model_fpaa.get_memory_footprint()
a = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
a = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCamelCase_ (self ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCamelCase_ , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.tokenizer(self.input_text , return_tensors="pt" )
a = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase_ ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = BitsAndBytesConfig()
a = True
a = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase_ , device_map="auto" )
a = self.tokenizer(self.input_text , return_tensors="pt" )
a = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCamelCase_ ) , self.EXPECTED_OUTPUTS )
def UpperCamelCase_ (self ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = BitsAndBytesConfig()
with self.assertRaises(lowerCamelCase_ ):
a = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCamelCase_ , load_in_abit=lowerCamelCase_ , device_map="auto" , bnb_abit_quant_type="nf4" , )
def UpperCamelCase_ (self ):
"""simple docstring"""
with self.assertRaises(lowerCamelCase_ ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCamelCase_ ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
a = self.tokenizer(self.input_text , return_tensors="pt" )
a = self.model_fpaa.to(torch.floataa )
a = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
a = self.model_fpaa.to("cpu" )
# Check this does not throw an error
a = self.model_fpaa.half()
# Check this does not throw an error
a = self.model_fpaa.float()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCamelCase_ , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def UpperCamelCase_ (cls ):
"""simple docstring"""
a = "t5-small"
a = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
a = AutoTokenizer.from_pretrained(cls.model_name )
a = "Translate in German: Hello, my dog is cute"
def UpperCamelCase_ (self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ (self ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
a = TaForConditionalGeneration._keep_in_fpaa_modules
a = None
# test with `t5-small`
a = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
a = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a = model.generate(**lowerCamelCase_ )
# test with `flan-t5-small`
a = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
a = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a = model.generate(**lowerCamelCase_ )
a = modules
def UpperCamelCase_ (self ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
a = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
a = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a = model.generate(**lowerCamelCase_ )
# test with `flan-t5-small`
a = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
a = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
a = model.generate(**lowerCamelCase_ )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
# model_name
a = "bigscience/bloom-560m"
a = "t5-small"
# Different types of model
a = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
# Sequence classification model
a = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
# CausalLM model
a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
# Seq2seq model
a = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCamelCase_ , device_map="auto" )
def UpperCamelCase_ (self ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ (self ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
def UpperCamelCase_ (self ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
a = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().setUp()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCamelCase_ , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
a = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
a = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCamelCase_ ) , self.EXPECTED_OUTPUTS )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = "facebook/opt-350m"
super().setUp()
def UpperCamelCase_ (self ):
"""simple docstring"""
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
a = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCamelCase_ )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
a = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
a = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCamelCase_ ) ):
a = LoRALayer(module.q_proj , rank=16 )
a = LoRALayer(module.k_proj , rank=16 )
a = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
a = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
a = model.forward(**lowerCamelCase_ )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCamelCase_ , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "gpt2-xl"
__A = 3.3_191_854_854_152_187
| 227 | 0 |
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""", [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(__snake_case, i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0, 4 ), range(4, 7 ), range(7, 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0, 1 ), range(1, 2 ), range(2, 3 )]),
], )
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Any ) -> List[Any]:
"""simple docstring"""
A__ : List[str] =_distribute_shards(**__snake_case )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""", [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
], )
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : Optional[int], __snake_case : Any ) -> int:
"""simple docstring"""
A__ : Optional[Any] =_split_gen_kwargs(__snake_case, __snake_case )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""", [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
], )
def __lowerCamelCase ( __snake_case : Dict, __snake_case : Tuple ) -> int:
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(__snake_case ):
_number_of_shards_in_gen_kwargs(__snake_case )
else:
A__ : Optional[Any] =_number_of_shards_in_gen_kwargs(__snake_case )
assert out == expected
| 136 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Any , lowerCAmelCase_ : float ) -> float:
'''simple docstring'''
return 0.0
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ : Tuple =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ : str =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : Any =512
A__ : int =[1] + [0] * (size - 1)
A__ : int =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : List[Any] =np.abs(np.fft.fft(__snake_case ) )
A__ : int =20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
A__ : Union[str, Any] =get_bounds(__snake_case, __snake_case )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__snake_case )
plt.show()
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : List[Any] =512
A__ : List[Any] =[1] + [0] * (size - 1)
A__ : Dict =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : str =np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__snake_case, -2 * pi ) )
plt.show()
| 136 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__a = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase__ ( snake_case_ :Optional[int] ):
return EnvironmentCommand()
def lowercase__ ( snake_case_ :List[str] ):
return EnvironmentCommand(args.accelerate_config_file )
class _UpperCAmelCase ( _lowerCAmelCase ):
@staticmethod
def a ( _lowercase : ArgumentParser ):
__UpperCAmelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_lowercase )
def __init__( self : Optional[int] , _lowercase : str , *_lowercase : Tuple ):
__UpperCAmelCase = accelerate_config_file
def a ( self : Dict ):
__UpperCAmelCase = '''not installed'''
if is_safetensors_available():
import safetensors
__UpperCAmelCase = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
__UpperCAmelCase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = __UpperCAmelCase = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__UpperCAmelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowercase ):
__UpperCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
__UpperCAmelCase = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_lowercase , _lowercase )
else F'''\t{accelerate_config}'''
)
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_torch_available():
import torch
__UpperCAmelCase = torch.__version__
__UpperCAmelCase = torch.cuda.is_available()
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_tf_available():
import tensorflow as tf
__UpperCAmelCase = tf.__version__
try:
# deprecated in v2.1
__UpperCAmelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__UpperCAmelCase = bool(tf.config.list_physical_devices('''GPU''' ) )
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
__UpperCAmelCase = flax.__version__
__UpperCAmelCase = jax.__version__
__UpperCAmelCase = jaxlib.__version__
__UpperCAmelCase = jax.lib.xla_bridge.get_backend().platform
__UpperCAmelCase = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def a ( _lowercase : str ):
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 332 | 0 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCAmelCase_ : int = logging.get_logger(__name__)
class UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 198 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : List[str] = """linear"""
lowerCAmelCase : int = """cosine"""
lowerCAmelCase : Dict = """cosine_with_restarts"""
lowerCAmelCase : Optional[Any] = """polynomial"""
lowerCAmelCase : Dict = """constant"""
lowerCAmelCase : Any = """constant_with_warmup"""
lowerCAmelCase : Union[str, Any] = """piecewise_constant"""
def UpperCamelCase ( _A : Optimizer , _A : int = -1 )-> Dict:
"""simple docstring"""
return LambdaLR(_A , lambda _A : 1 , last_epoch=_A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int = -1 )-> Optional[Any]:
"""simple docstring"""
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1.0 , _A ) )
return 1.0
return LambdaLR(_A , _A , last_epoch=_A )
def UpperCamelCase ( _A : Optimizer , _A : str , _A : int = -1 )-> Dict:
"""simple docstring"""
A__ = {}
A__ = step_rules.split("," )
for rule_str in rule_list[:-1]:
A__ , A__ = rule_str.split(":" )
A__ = int(_A )
A__ = float(_A )
A__ = value
A__ = float(rule_list[-1] )
def create_rules_function(_A : Any , _A : Optional[int] ):
def rule_func(_A : int ) -> float:
A__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ = create_rules_function(_A , _A )
return LambdaLR(_A , _A , last_epoch=_A )
def UpperCamelCase ( _A : Any , _A : Union[str, Any] , _A : str , _A : str=-1 )-> Tuple:
"""simple docstring"""
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int , _A : float = 0.5 , _A : int = -1 )-> Any:
"""simple docstring"""
def lr_lambda(_A : Tuple ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_A ) * 2.0 * progress )) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int , _A : int = 1 , _A : int = -1 )-> Any:
"""simple docstring"""
def lr_lambda(_A : Tuple ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_A ) * progress) % 1.0) )) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[str] , _A : Tuple=1E-7 , _A : Dict=1.0 , _A : Union[str, Any]=-1 )-> Any:
"""simple docstring"""
A__ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ = lr_init - lr_end
A__ = num_training_steps - num_warmup_steps
A__ = 1 - (current_step - num_warmup_steps) / decay_steps
A__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_A , _A , _A )
UpperCAmelCase_ : Any = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase ( _A : Union[str, SchedulerType] , _A : Optimizer , _A : Optional[str] = None , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 1.0 , _A : int = -1 , )-> Union[str, Any]:
"""simple docstring"""
A__ = SchedulerType(_A )
A__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_A , last_epoch=_A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_A , step_rules=_A , last_epoch=_A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_A , num_warmup_steps=_A , last_epoch=_A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , num_cycles=_A , last_epoch=_A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , power=_A , last_epoch=_A , )
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , last_epoch=_A )
| 198 | 1 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : str=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=9 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : int=8 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.0_0_2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = encoder_seq_length
lowerCamelCase__ : int = decoder_seq_length
# For common tests
lowerCamelCase__ : List[str] = self.decoder_seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : List[str] = decoder_start_token_id
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = decoder_layers
def A_ ( self : List[Any] ) -> int:
return TaConfig.from_pretrained('google/umt5-base' )
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, input_dict
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Optional[int] ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , ) -> str:
lowerCamelCase__ : Dict = UMTaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
lowerCamelCase__ : Dict = result.last_hidden_state
lowerCamelCase__ : Any = result.past_key_values
lowerCamelCase__ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
lowerCamelCase__ : List[Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
# first forward pass
lowerCamelCase__ : Tuple = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase )['last_hidden_state']
# select random slice
lowerCamelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : int = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs[0]
lowerCamelCase__ : Any = UMTaForConditionalGeneration(UpperCAmelCase ).eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ):
lowerCamelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase )
lowerCamelCase__ : Tuple = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCAmelCase , legacy=UpperCAmelCase )
lowerCamelCase__ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_tensors='pt' , padding=UpperCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(input_ids.to(UpperCAmelCase ) )
lowerCamelCase__ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50 |
from __future__ import annotations
_SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,-1 ,-1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,len(UpperCamelCase_ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if row >= len(UpperCamelCase_ ):
solution.append(UpperCamelCase_ )
printboard(UpperCamelCase_ )
print()
return True
for i in range(len(UpperCamelCase_ ) ):
if is_safe(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
snake_case = 1
solve(UpperCamelCase_ ,row + 1 )
snake_case = 0
return False
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
for j in range(len(UpperCamelCase_ ) ):
if board[i][j] == 1:
print('''Q''' ,end=''' ''' )
else:
print('''.''' ,end=''' ''' )
print()
# n=int(input("The no. of queens"))
_SCREAMING_SNAKE_CASE : Tuple = 8
_SCREAMING_SNAKE_CASE : List[Any] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 127 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = BertConfig.from_json_file(_UpperCamelCase )
print(f"Building PyTorch model from configuration: {config}" )
__lowerCAmelCase = BertForPreTraining(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _UpperCamelCase )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 357 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Union[str, Any] = "https://openaipublic.azureedge.net/jukebox/models/"
A : Tuple = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
__lowerCAmelCase = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
__lowerCAmelCase = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
__lowerCAmelCase = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
__lowerCAmelCase = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
__lowerCAmelCase = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
__lowerCAmelCase = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__lowerCAmelCase = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
__lowerCAmelCase = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {}
import re
__lowerCAmelCase = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__lowerCAmelCase = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__lowerCAmelCase = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
__lowerCAmelCase = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_encoder_block_conv_in.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
__lowerCAmelCase = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
__lowerCAmelCase = re_encoder_block_conv_in.sub(_UpperCamelCase , _UpperCamelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_encoder_block_resnet.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
__lowerCAmelCase = {"1": 1, "3": 2}[groups[-2]]
__lowerCAmelCase = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
__lowerCAmelCase = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowerCAmelCase = prefix + resnet_block
__lowerCAmelCase = re_encoder_block_resnet.sub(_UpperCamelCase , _UpperCamelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_encoder_block_proj_out.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = f"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
__lowerCAmelCase = re_encoder_block_proj_out.sub(_UpperCamelCase , _UpperCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_decoder_block_conv_out.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCAmelCase = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
__lowerCAmelCase = re_decoder_block_conv_out.sub(_UpperCamelCase , _UpperCamelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_decoder_block_resnet.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCAmelCase = {"1": 1, "3": 2}[groups[-2]]
__lowerCAmelCase = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
__lowerCAmelCase = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowerCAmelCase = prefix + resnet_block
__lowerCAmelCase = re_decoder_block_resnet.sub(_UpperCamelCase , _UpperCamelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_decoder_block_proj_in.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = f"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
__lowerCAmelCase = re_decoder_block_proj_in.sub(_UpperCamelCase , _UpperCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_prior_cond_conv_out.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCAmelCase = f"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
__lowerCAmelCase = re_prior_cond_conv_out.sub(_UpperCamelCase , _UpperCamelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_prior_cond_resnet.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCAmelCase = {"1": 1, "3": 2}[groups[-2]]
__lowerCAmelCase = f"conditioner_blocks.upsampler.upsample_block.{block_index}."
__lowerCAmelCase = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowerCAmelCase = prefix + resnet_block
__lowerCAmelCase = re_prior_cond_resnet.sub(_UpperCamelCase , _UpperCamelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_prior_cond_proj_in.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = f"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
__lowerCAmelCase = re_prior_cond_proj_in.sub(_UpperCamelCase , _UpperCamelCase )
# keep original key
else:
__lowerCAmelCase = original_key
__lowerCAmelCase = replace_key(_UpperCamelCase )
if f"{key_prefix}.{key}" not in model_state_dict or key is None:
print(f"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[f"{key_prefix}.{key}"].shape:
__lowerCAmelCase = model_state_dict[f"{key_prefix}.{key}"]
print(f"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
__lowerCAmelCase = original_key
__lowerCAmelCase = original_key
__lowerCAmelCase = value
return new_dict
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase=None , _UpperCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
__lowerCAmelCase = requests.get(f"{PREFIX}{file}" , allow_redirects=_UpperCamelCase )
os.makedirs(f"{pytorch_dump_folder_path}/" , exist_ok=_UpperCamelCase )
open(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
__lowerCAmelCase = MODEL_MAPPING[model_name.split("/" )[-1]]
__lowerCAmelCase = JukeboxConfig.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = JukeboxModel(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = {}
for i, dict_name in enumerate(_UpperCamelCase ):
__lowerCAmelCase = torch.load(f"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
__lowerCAmelCase = {}
for k in old_dic.keys():
if k.endswith(".b" ):
__lowerCAmelCase = old_dic[k]
elif k.endswith(".w" ):
__lowerCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__lowerCAmelCase = old_dic[k]
else:
__lowerCAmelCase = old_dic[k]
__lowerCAmelCase = "vqvae" if i == 0 else f"priors.{3 - i}"
__lowerCAmelCase = fix_jukebox_keys(_UpperCamelCase , model.state_dict() , _UpperCamelCase , _UpperCamelCase )
weight_dict.append(_UpperCamelCase )
__lowerCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
with open(f"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
return weight_dict
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
A : Union[str, Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 259 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def UpperCAmelCase__ ( _A : int ):
'''simple docstring'''
a__ =prime_factors(_lowerCamelCase )
if is_square_free(_lowerCamelCase ):
return -1 if len(_lowerCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 188 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_SCREAMING_SNAKE_CASE : Optional[int] = '''
Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.
In March 2021, Hugging Face raised $40 million in a Series B funding round.[3]
On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]
'''
class a ( unittest.TestCase , __snake_case ):
def UpperCamelCase ( self : int ) -> Optional[int]:
lowerCamelCase_ = load_tool('text-question-answering' )
self.tool.setup()
lowerCamelCase_ = load_tool('text-question-answering' , remote=__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Dict ) -> Tuple:
lowerCamelCase_ = self.tool(__SCREAMING_SNAKE_CASE , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop' )
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
lowerCamelCase_ = self.remote_tool(__SCREAMING_SNAKE_CASE , 'What did Hugging Face do in April 2021?' )
self.assertEqual(__SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop' )
def UpperCamelCase ( self : List[str] ) -> Optional[int]:
lowerCamelCase_ = self.tool(text=__SCREAMING_SNAKE_CASE , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop' )
def UpperCamelCase ( self : Union[str, Any] ) -> int:
lowerCamelCase_ = self.remote_tool(text=__SCREAMING_SNAKE_CASE , question='What did Hugging Face do in April 2021?' )
self.assertEqual(__SCREAMING_SNAKE_CASE , 'launched the BigScience Research Workshop' )
| 183 | 0 |
"""simple docstring"""
def _lowercase ( __snake_case = 4_000_000 ) -> int:
__lowerCAmelCase : Any = [0, 1]
__lowerCAmelCase : Dict = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__lowerCAmelCase : Dict = 0
for j in range(len(__snake_case ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""") | 362 |
"""simple docstring"""
__snake_case : Any = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
__snake_case : Union[str, Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
__snake_case : int = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
__snake_case : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
__snake_case : Dict = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
__snake_case : Any = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
__snake_case : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
__snake_case : str = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
] | 58 | 0 |
from __future__ import annotations
import requests
__snake_case = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def _A ( _lowercase , _lowercase = 1 , _lowercase = "new" , _lowercase = None ) -> dict:
"""simple docstring"""
__UpperCamelCase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_lowercase ) - valid_terms ) ):
__UpperCamelCase = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(_lowercase )
__UpperCamelCase = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'User-agent': 'A random string'} , )
if response.status_code == 4_29:
raise requests.HTTPError
__UpperCamelCase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_lowercase )}
__UpperCamelCase = {}
for id_ in range(_lowercase ):
__UpperCamelCase = {
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 310 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"""The RoBERTa Model transformer with early exiting (DeeRoBERTa). """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Union[str, Any],A_: List[str] ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = RobertaEmbeddings(A_ )
self.init_weights()
@add_start_docstrings(
"""RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. """ , _a , )
class __lowerCamelCase (_a ):
_lowercase = RobertaConfig
_lowercase = """roberta"""
def __init__( self: Any,A_: int ):
'''simple docstring'''
super().__init__(A_ )
__UpperCamelCase = config.num_labels
__UpperCamelCase = config.num_hidden_layers
__UpperCamelCase = DeeRobertaModel(A_ )
__UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
__UpperCamelCase = nn.Linear(config.hidden_size,self.config.num_labels )
@add_start_docstrings_to_model_forward(A_ )
def snake_case_ ( self: List[str],A_: int=None,A_: List[Any]=None,A_: List[str]=None,A_: List[str]=None,A_: Optional[int]=None,A_: List[str]=None,A_: Any=None,A_: List[Any]=-1,A_: List[Any]=False,):
'''simple docstring'''
__UpperCamelCase = self.num_layers
try:
__UpperCamelCase = self.roberta(
A_,attention_mask=A_,token_type_ids=A_,position_ids=A_,head_mask=A_,inputs_embeds=A_,)
__UpperCamelCase = outputs[1]
__UpperCamelCase = self.dropout(A_ )
__UpperCamelCase = self.classifier(A_ )
__UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__UpperCamelCase = e.message
__UpperCamelCase = e.exit_layer
__UpperCamelCase = outputs[0]
if not self.training:
__UpperCamelCase = entropy(A_ )
__UpperCamelCase = []
__UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(logits.view(-1,self.num_labels ),labels.view(-1 ) )
# work with highway exits
__UpperCamelCase = []
for highway_exit in outputs[-1]:
__UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__UpperCamelCase = MSELoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1 ),labels.view(-1 ) )
else:
__UpperCamelCase = CrossEntropyLoss()
__UpperCamelCase = loss_fct(highway_logits.view(-1,self.num_labels ),labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
__UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__UpperCamelCase = (loss,) + outputs
if not self.training:
__UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 310 | 1 |
"""simple docstring"""
import socket
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCamelCase_ = socket.gethostname()
lowerCamelCase_ = 1_23_12
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
lowerCamelCase_ = sock.recv(10_24 )
if not data:
break
out_file.write(_A )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 358 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCamelCase : Optional[int] = float("nan")
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = sys.stdout
lowerCamelCase_ = open(A_ , 'a' )
def __getattr__( self : List[Any] , A_ : Optional[int] ) -> str:
"""simple docstring"""
return getattr(self.stdout , A_ )
def a__ ( self : int , A_ : int ) -> List[str]:
"""simple docstring"""
self.stdout.write(A_ )
# strip tqdm codes
self.file.write(re.sub(r'^.*\r' , '' , A_ , 0 , re.M ) )
def _SCREAMING_SNAKE_CASE ( lowercase : str=80 , lowercase : Tuple=False ):
'''simple docstring'''
lowerCamelCase_ = []
# deal with critical env vars
lowerCamelCase_ = ['CUDA_VISIBLE_DEVICES']
for key in env_keys:
lowerCamelCase_ = os.environ.get(lowercase , lowercase )
if val is not None:
cmd.append(f"""{key}={val}""" )
# python executable (not always needed if the script is executable)
lowerCamelCase_ = sys.executable if full_python_path else sys.executable.split('/' )[-1]
cmd.append(lowercase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase_ = []
lowerCamelCase_ = ''
while len(lowercase ) > 0:
current_line += f"""{cmd.pop(0 )} """
if len(lowercase ) == 0 or len(lowercase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(lowercase )
lowerCamelCase_ = ''
return "\\\n".join(lowercase )
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[int] , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = re.sub(r'[\\\n]+' , ' ' , args.base_cmd )
# remove --output_dir if any and set our own
lowerCamelCase_ = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd )
args.base_cmd += f""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
lowerCamelCase_ = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : int , lowercase : Dict , lowercase : List[str] , lowercase : List[str] , lowercase : List[str] , lowercase : Dict ):
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 1_00 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
lowerCamelCase_ = subprocess.run(lowercase , capture_output=lowercase , text=lowercase )
if verbose:
print('STDOUT' , result.stdout )
print('STDERR' , result.stderr )
# save the streams
lowerCamelCase_ = variation.replace(' ' , '-' )
with open(Path(lowercase ) / f"""log.{prefix}.stdout.txt""" , 'w' ) as f:
f.write(result.stdout )
with open(Path(lowercase ) / f"""log.{prefix}.stderr.txt""" , 'w' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('failed' )
return {target_metric_key: nan}
with io.open(f"""{output_dir}/all_results.json""" , 'r' , encoding='utf-8' ) as f:
lowerCamelCase_ = json.load(lowercase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : Dict , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Dict , lowercase : Any , lowercase : int , ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = f"""{id}: {variation:<{longest_variation_len}}"""
lowerCamelCase_ = f"""{preamble}: """
lowerCamelCase_ = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(lowercase ) , desc=lowercase , leave=lowercase ):
lowerCamelCase_ = process_run_single(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
lowerCamelCase_ = single_run_metrics[target_metric_key]
if not math.isnan(lowercase ):
metrics.append(lowercase )
results.append(lowercase )
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase_ = f"""\33[2K\r{outcome}"""
if len(lowercase ) > 0:
lowerCamelCase_ = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCamelCase_ = round(mean_metrics[target_metric_key] , 2 )
lowerCamelCase_ = f"""{outcome} {mean_target}"""
if len(lowercase ) > 1:
results_str += f""" {tuple(round(lowercase , 2 ) for x in results )}"""
print(lowercase )
lowerCamelCase_ = variation
return mean_metrics
else:
print(lowercase )
return {variation_key: variation, target_metric_key: nan}
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = torch.cuda.get_device_properties(torch.device('cuda' ) )
return f"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase_ = pd.DataFrame(lowercase )
lowerCamelCase_ = 'variation'
lowerCamelCase_ = 'diff_%'
lowerCamelCase_ = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCamelCase_ = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(lowercase ):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase_ = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(lowercase ):
lowerCamelCase_ = df.apply(
lambda lowercase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='columns' , )
# re-order columns
lowerCamelCase_ = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase_ = df.reindex(lowercase , axis='columns' ) # reorder cols
# capitalize
lowerCamelCase_ = df.rename(str.capitalize , axis='columns' )
# make the cols as narrow as possible
lowerCamelCase_ = df.rename(lambda lowercase : c.replace('_' , '<br>' ) , axis='columns' )
lowerCamelCase_ = df.rename(lambda lowercase : c.replace('_' , '\n' ) , axis='columns' )
lowerCamelCase_ = ['', 'Copy between the cut-here-lines and paste as is to github or a forum']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=lowercase , floatfmt='.2f' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=lowercase , floatfmt='.2f' )]
print('\n\n'.join(lowercase ) )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--base-cmd' , default=lowercase , type=lowercase , required=lowercase , help='Base cmd' , )
parser.add_argument(
'--variations' , default=lowercase , type=lowercase , nargs='+' , required=lowercase , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , )
parser.add_argument(
'--base-variation' , default=lowercase , type=lowercase , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , )
parser.add_argument(
'--target-metric-key' , default=lowercase , type=lowercase , required=lowercase , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , )
parser.add_argument(
'--report-metric-keys' , default='' , type=lowercase , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , )
parser.add_argument(
'--repeat-times' , default=1 , type=lowercase , help='How many times to re-run each variation - an average will be reported' , )
parser.add_argument(
'--output_dir' , default='output_benchmark' , type=lowercase , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , )
parser.add_argument(
'--verbose' , default=lowercase , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , )
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = args.output_dir
Path(lowercase ).mkdir(exist_ok=lowercase )
lowerCamelCase_ = get_base_command(lowercase , lowercase )
# split each dimension into its --foo variations
lowerCamelCase_ = [list(map(str.strip , re.split(r'\|' , lowercase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase_ = list(map(str.strip , map(' '.join , itertools.product(*lowercase ) ) ) )
lowerCamelCase_ = max(len(lowercase ) for x in variations )
# split wanted keys
lowerCamelCase_ = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase_ = f"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(f"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(f"""and this script's output is also piped into {report_fn}""" )
lowerCamelCase_ = Tee(lowercase )
print(f"""\n*** Running {len(lowercase )} benchmarks:""" )
print(f"""Base command: {" ".join(lowercase )}""" )
lowerCamelCase_ = 'variation'
lowerCamelCase_ = []
for id, variation in enumerate(tqdm(lowercase , desc='Total completion: ' , leave=lowercase ) ):
lowerCamelCase_ = base_cmd + variation.split()
results.append(
process_run(
id + 1 , lowercase , lowercase , lowercase , lowercase , args.target_metric_key , lowercase , args.repeat_times , lowercase , args.verbose , ) )
process_results(lowercase , args.target_metric_key , lowercase , args.base_variation , lowercase )
if __name__ == "__main__":
main()
| 208 | 0 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a( A : Tuple ) -> Optional[Any]:
"""simple docstring"""
a = model.config
a = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
a = MBartConfig(
is_decoder=A , is_encoder_decoder=A , add_cross_attention=A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=A , add_final_layer_norm=A , )
return encoder_config, decoder_config
def a( A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if "encoder.model" in name:
a = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
a = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
a = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
a = "encoder." + name
if "attn.proj" in name:
a = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
a = name.replace("attn" , "attention.self" )
if "norm1" in name:
a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
a = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
a = "encoder.layernorm.bias"
return name
def a( A : Union[str, Any] , A : Tuple ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(A )
if "qkv" in key:
a = key.split("." )
a = int(key_split[3] )
a = int(key_split[5] )
a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = val[:dim]
a = val[dim : dim * 2]
a = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
a = val
return orig_state_dict
def a( A : List[Any] , A : Tuple=None , A : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
a = DonutModel.from_pretrained(A ).eval()
# load HuggingFace model
a , a = get_configs(A )
a = DonutSwinModel(A )
a = MBartForCausalLM(A )
a = VisionEncoderDecoderModel(encoder=A , decoder=A )
model.eval()
a = original_model.state_dict()
a = convert_state_dict(A , A )
model.load_state_dict(A )
# verify results on scanned document
a = load_dataset("hf-internal-testing/example-documents" )
a = dataset["test"][0]["image"].convert("RGB" )
a = XLMRobertaTokenizerFast.from_pretrained(A , from_slow=A )
a = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
a = DonutProcessor(A , A )
a = processor(A , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
a = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a = "When is the coffee break?"
a = task_prompt.replace("{user_input}" , A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
a = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
a = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
a = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
a = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
a = "hello world"
else:
raise ValueError("Model name not supported" )
a = original_model.decoder.tokenizer(A , add_special_tokens=A , return_tensors="pt" )[
"input_ids"
]
a = original_model.encoder.model.patch_embed(A )
a , a = model.encoder.embeddings(A )
assert torch.allclose(A , A , atol=1e-3 )
# verify encoder hidden states
a = original_model.encoder(A )
a = model.encoder(A ).last_hidden_state
assert torch.allclose(A , A , atol=1e-2 )
# verify decoder hidden states
a = original_model(A , A , A ).logits
a = model(A , decoder_input_ids=A ).logits
assert torch.allclose(A , A , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
_lowercase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_lowercase: Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 227 |
def a( A : int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
a = len(bin(A )[3:] )
a = bin(abs(A ) - (1 << binary_number_length) )[3:]
a = (
(
"1"
+ "0" * (binary_number_length - len(A ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 | 1 |
lowercase_ = 8.314_4598
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowercase_ = 3_0_0
lowercase_ = 2_8
lowercase_ = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 194 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] , a: int , a: List[Any] , a: Any ):
self.assertEqual(len(a ) , len(a ) )
for a, b in zip(a , a ):
self.assertAlmostEqual(a , a , delta=a )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def _snake_case ( self: str ):
__lowerCamelCase : Any = None
ops.enable_eager_execution_internal()
__lowerCamelCase : List[str] = tf.config.list_physical_devices('CPU' )
if len(a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__lowerCamelCase : Dict = tf.config.list_logical_devices(device_type='CPU' )
__lowerCamelCase : List[Any] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__lowerCamelCase : Optional[Any] = GradientAccumulator()
__lowerCamelCase : int = tf.Variable([4.0, 3.0] )
__lowerCamelCase , __lowerCamelCase : Any = create_optimizer(5e-5 , 10 , 5 )
__lowerCamelCase : Any = tf.Variable([0.0, 0.0] , trainable=a )
def accumulate_on_replica(a: str ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(a: List[Any] , a: List[str] ):
with strategy.scope():
__lowerCamelCase : Optional[int] = strategy.experimental_local_results(a )
local_variables[0].assign(a )
local_variables[1].assign(a )
strategy.run(a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(a )
def _check_local_values(a: str , a: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , a , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , a , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 194 | 1 |
"""simple docstring"""
def A_ ( _lowercase = 100 ):
'''simple docstring'''
snake_case_ :Optional[Any] = set()
snake_case_ :List[Any] = 0
snake_case_ :Tuple = n + 1 # maximum limit
for a in range(2, _a ):
for b in range(2, _a ):
snake_case_ :Tuple = a**b # calculates the current power
collect_powers.add(_a ) # adds the result to the set
return len(_a )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 66 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A =logging.get_logger(__name__)
A ={
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
A ={
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def snake_case_ (_a : List[str] ):
UpperCAmelCase = EfficientNetConfig()
UpperCAmelCase = CONFIG_MAP[model_name]['''hidden_dim''']
UpperCAmelCase = CONFIG_MAP[model_name]['''width_coef''']
UpperCAmelCase = CONFIG_MAP[model_name]['''depth_coef''']
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = CONFIG_MAP[model_name]['''dropout_rate''']
UpperCAmelCase = CONFIG_MAP[model_name]['''dw_padding''']
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = 1_0_0_0
UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ():
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(_a , stream=_a ).raw )
return im
def snake_case_ (_a : str ):
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=_a , )
return preprocessor
def snake_case_ (_a : Optional[Any] ):
UpperCAmelCase = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
UpperCAmelCase = sorted(set(_a ) )
UpperCAmelCase = len(_a )
UpperCAmelCase = {b: str(_a ) for b, i in zip(_a , range(_a ) )}
UpperCAmelCase = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
UpperCAmelCase = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase = '''efficientnet.''' + item[1]
UpperCAmelCase = '''classifier.weight'''
UpperCAmelCase = '''classifier.bias'''
return key_mapping
def snake_case_ (_a : Dict , _a : List[str] , _a : Dict ):
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase = torch.from_numpy(_a ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase = torch.from_numpy(_a ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase = torch.from_numpy(np.transpose(_a ) )
else:
UpperCAmelCase = torch.from_numpy(_a )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_a )
@torch.no_grad()
def snake_case_ (_a : Optional[Any] , _a : List[str] , _a : Optional[int] , _a : Dict ):
UpperCAmelCase = model_classes[model_name](
include_top=_a , weights='''imagenet''' , input_tensor=_a , input_shape=_a , pooling=_a , classes=1_0_0_0 , classifier_activation='''softmax''' , )
UpperCAmelCase = original_model.trainable_variables
UpperCAmelCase = original_model.non_trainable_variables
UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase = param.numpy()
UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase = get_efficientnet_config(_a )
UpperCAmelCase = EfficientNetForImageClassification(_a ).eval()
UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
UpperCAmelCase = rename_keys(_a )
replace_params(_a , _a , _a )
# Initialize preprocessor and preprocess input image
UpperCAmelCase = convert_image_processor(_a )
UpperCAmelCase = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase = hf_model(**_a )
UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase = False
UpperCAmelCase = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase = image.img_to_array(_a )
UpperCAmelCase = np.expand_dims(_a , axis=0 )
UpperCAmelCase = original_model.predict(_a )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_a , _a , atol=1E-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_a ):
os.mkdir(_a )
# Save converted model and image processor
hf_model.save_pretrained(_a )
preprocessor.save_pretrained(_a )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
UpperCAmelCase = F"efficientnet-{model_name}"
preprocessor.push_to_hub(_a )
hf_model.push_to_hub(_a )
if __name__ == "__main__":
A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
A =parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 34 | 0 |
'''simple docstring'''
import re
def _a ( _lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : int = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(_lowercase , _lowercase ) )
if __name__ == "__main__":
__UpperCAmelCase :List[Any] = "0094702343221"
print(is_sri_lankan_phone_number(phone)) | 240 |
'''simple docstring'''
def _a ( _lowercase : int = 600851475143 ):
'''simple docstring'''
try:
__UpperCAmelCase : str = int(_lowercase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : List[str] = 2
while i * i <= n:
while n % i == 0:
__UpperCAmelCase : int = i
n //= i
i += 1
if n > 1:
__UpperCAmelCase : List[str] = n
return int(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""") | 240 | 1 |
def a__ ( _UpperCamelCase : int ):
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
__lowerCamelCase = [True] * (num + 1)
__lowerCamelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p ,num + 1 ,_UpperCamelCase ):
__lowerCamelCase = False
p += 1
return [prime for prime in range(2 ,num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 330 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
class __lowerCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = True
@register_to_config
def __init__( self , __UpperCAmelCase = 3 , __UpperCAmelCase = 3 , __UpperCAmelCase = ("DownEncoderBlock2D",) , __UpperCAmelCase = ("UpDecoderBlock2D",) , __UpperCAmelCase = (64,) , __UpperCAmelCase = 1 , __UpperCAmelCase = "silu" , __UpperCAmelCase = 4 , __UpperCAmelCase = 32 , __UpperCAmelCase = 32 , __UpperCAmelCase = 0.18_215 , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
__lowerCamelCase = Encoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , )
# pass init params to Decoder
__lowerCamelCase = Decoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , act_fn=__UpperCAmelCase , )
__lowerCamelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCamelCase = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
__lowerCamelCase = False
__lowerCamelCase = False
# only relevant if vae tiling is enabled
__lowerCamelCase = self.config.sample_size
__lowerCamelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCamelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCamelCase = 0.25
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
'''simple docstring'''
if isinstance(__UpperCAmelCase , (Encoder, Decoder) ):
__lowerCamelCase = value
def lowerCamelCase ( self , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = use_tiling
def lowerCamelCase ( self ):
'''simple docstring'''
self.enable_tiling(__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = True
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = {}
def fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
__lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return processors
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(__UpperCAmelCase )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if hasattr(__UpperCAmelCase , '''set_processor''' ):
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
module.set_processor(__UpperCAmelCase )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , __UpperCAmelCase , __UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
if self.use_slicing and x.shape[0] > 1:
__lowerCamelCase = [self.encoder(__UpperCAmelCase ) for x_slice in x.split(1 )]
__lowerCamelCase = torch.cat(__UpperCAmelCase )
else:
__lowerCamelCase = self.encoder(__UpperCAmelCase )
__lowerCamelCase = self.quant_conv(__UpperCAmelCase )
__lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__UpperCAmelCase , return_dict=__UpperCAmelCase )
__lowerCamelCase = self.post_quant_conv(__UpperCAmelCase )
__lowerCamelCase = self.decoder(__UpperCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
@apply_forward_hook
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
__lowerCamelCase = [self._decode(__UpperCAmelCase ).sample for z_slice in z.split(1 )]
__lowerCamelCase = torch.cat(__UpperCAmelCase )
else:
__lowerCamelCase = self._decode(__UpperCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = min(a.shape[2] , b.shape[2] , __UpperCAmelCase )
for y in range(__UpperCAmelCase ):
__lowerCamelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = min(a.shape[3] , b.shape[3] , __UpperCAmelCase )
for x in range(__UpperCAmelCase ):
__lowerCamelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCamelCase = []
for i in range(0 , x.shape[2] , __UpperCAmelCase ):
__lowerCamelCase = []
for j in range(0 , x.shape[3] , __UpperCAmelCase ):
__lowerCamelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCamelCase = self.encoder(__UpperCAmelCase )
__lowerCamelCase = self.quant_conv(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
__lowerCamelCase = []
for i, row in enumerate(__UpperCAmelCase ):
__lowerCamelCase = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 )
__lowerCamelCase = DiagonalGaussianDistribution(__UpperCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = True ):
'''simple docstring'''
__lowerCamelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCamelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCamelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCamelCase = []
for i in range(0 , z.shape[2] , __UpperCAmelCase ):
__lowerCamelCase = []
for j in range(0 , z.shape[3] , __UpperCAmelCase ):
__lowerCamelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCamelCase = self.post_quant_conv(__UpperCAmelCase )
__lowerCamelCase = self.decoder(__UpperCAmelCase )
row.append(__UpperCAmelCase )
rows.append(__UpperCAmelCase )
__lowerCamelCase = []
for i, row in enumerate(__UpperCAmelCase ):
__lowerCamelCase = []
for j, tile in enumerate(__UpperCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCamelCase = self.blend_v(rows[i - 1][j] , __UpperCAmelCase , __UpperCAmelCase )
if j > 0:
__lowerCamelCase = self.blend_h(row[j - 1] , __UpperCAmelCase , __UpperCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__UpperCAmelCase , dim=3 ) )
__lowerCamelCase = torch.cat(__UpperCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , ):
'''simple docstring'''
__lowerCamelCase = sample
__lowerCamelCase = self.encode(__UpperCAmelCase ).latent_dist
if sample_posterior:
__lowerCamelCase = posterior.sample(generator=__UpperCAmelCase )
else:
__lowerCamelCase = posterior.mode()
__lowerCamelCase = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 330 | 1 |
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , _A , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = 99
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 37
SCREAMING_SNAKE_CASE_ = '''gelu'''
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = 512
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 0.02
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = None
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = TFDistilBertModel(config=_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE_ = model(_A )
SCREAMING_SNAKE_CASE_ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TFDistilBertForMaskedLM(config=_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A ) -> str:
SCREAMING_SNAKE_CASE_ = TFDistilBertForQuestionAnswering(config=_A )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFDistilBertForSequenceClassification(_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = TFDistilBertForMultipleChoice(_A )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFDistilBertForTokenClassification(_A )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
SCREAMING_SNAKE_CASE_ = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =(
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCAmelCase_ =(
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TFDistilBertModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , dim=37 )
def _UpperCamelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_A )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_A )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_A )
@slow
def _UpperCamelCase ( self ) -> List[str]:
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
SCREAMING_SNAKE_CASE_ = TFDistilBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
SCREAMING_SNAKE_CASE_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ = model(_A )[0]
SCREAMING_SNAKE_CASE_ = [1, 6, 768]
self.assertEqual(output.shape , _A )
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 )
| 363 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_ = BitConfig(
conv_layer=__lowerCamelCase, num_labels=10_00, idalabel=__lowerCamelCase, labelaid=__lowerCamelCase, )
return config
def A__ ( __lowerCamelCase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_ = '''bit.encoder.''' + name
return name
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
SCREAMING_SNAKE_CASE_ = get_config(__lowerCamelCase )
# load original model from timm
SCREAMING_SNAKE_CASE_ = create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = BitForImageClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# create image processor
SCREAMING_SNAKE_CASE_ = create_transform(**resolve_data_config({}, model=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = transform.transforms
SCREAMING_SNAKE_CASE_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__lowerCamelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__lowerCamelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__lowerCamelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = transform(__lowerCamelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = processor(__lowerCamelCase, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCamelCase, __lowerCamelCase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_ = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a: List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__a: List[Any] = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> str:
lowercase__ : List[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/''' ) )
lowercase__ : int = self.transformer_dir
shutil.copy(
os.path.join(__lowerCAmelCase , '''src/transformers/models/bert/modeling_bert.py''' ) , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''' ) , )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Any = '''src/transformers'''
shutil.rmtree(self.transformer_dir )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> List[str]:
lowercase__ : Dict = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
lowercase__ : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
lowercase__ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
lowercase__ : List[Any] = black.format_str(__lowerCAmelCase , mode=__lowerCAmelCase )
lowercase__ : List[str] = os.path.join(self.transformer_dir , '''new_code.py''' )
with open(__lowerCAmelCase , '''w''' , newline='''\n''' ) as f:
f.write(__lowerCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__lowerCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__lowerCAmelCase )
with open(__lowerCAmelCase , '''r''' ) as f:
self.assertTrue(f.read() , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Optional[int] = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''' )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> str:
# Base copy consistency
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __lowerCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __lowerCAmelCase ) , )
# Copy consistency with a really long name
lowercase__ : Any = '''TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub('''Bert''' , __lowerCAmelCase , __lowerCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __lowerCAmelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __lowerCAmelCase ) , )
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Optional[int] = check_copies.LOCALIZED_READMES['''README_zh-hans.md''']
lowercase__ : Any = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'''
''' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'''
''' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'''
''' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'''
''' Luong, Quoc V. Le, Christopher D. Manning.'''
)
lowercase__ : List[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase__ : Dict = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'''
''' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'''
''' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'''
''' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'''
''' method has been applied to compress GPT2 into'''
''' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'''
''' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'''
''' Multilingual BERT into'''
''' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'''
''' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'''
''' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'''
''' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'''
''' Christopher D. Manning 发布。\n'''
)
lowercase__ , lowercase__ : Tuple = check_copies.convert_to_localized_md(
__lowerCAmelCase , __lowerCAmelCase , localized_readme['''format_model_list'''] )
self.assertFalse(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ , lowercase__ : Tuple = check_copies.convert_to_localized_md(
__lowerCAmelCase , __lowerCAmelCase , localized_readme['''format_model_list'''] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__lowerCAmelCase )
lowercase__ : List[Any] = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'''
''' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'''
''' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'''
''' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'''
)
lowercase__ : Optional[int] = (
'''1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'''
''' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase__ : Dict = (
'''1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'''
''' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'''
''' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'''
''' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'''
)
lowercase__ , lowercase__ : int = check_copies.convert_to_localized_md(
__lowerCAmelCase , __lowerCAmelCase , localized_readme['''format_model_list'''] )
# Check if the model link is synchronized.
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
| 198 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a: Dict = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a: Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 198 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : str = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
A : Optional[int] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE ) , x.transpose() ) )
A : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Dict = np.random.randn(3 , 4 )
A : Tuple = torch.tensor(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE ) , transpose(SCREAMING_SNAKE_CASE ).numpy() ) )
A : int = np.random.randn(3 , 4 , 5 )
A : Any = torch.tensor(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : int = np.random.randn(3 , 4 )
A : Optional[Any] = tf.constant(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE ) , transpose(SCREAMING_SNAKE_CASE ).numpy() ) )
A : List[Any] = np.random.randn(3 , 4 , 5 )
A : Optional[int] = tf.constant(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[str] = np.random.randn(3 , 4 )
A : Optional[int] = jnp.array(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE ) , np.asarray(transpose(SCREAMING_SNAKE_CASE ) ) ) )
A : Tuple = np.random.randn(3 , 4 , 5 )
A : str = jnp.array(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) , np.asarray(transpose(SCREAMING_SNAKE_CASE , axes=(1, 2, 0) ) ) ) )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : List[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE , (4, 3) ) , np.reshape(SCREAMING_SNAKE_CASE , (4, 3) ) ) )
A : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE , (12, 5) ) , np.reshape(SCREAMING_SNAKE_CASE , (12, 5) ) ) )
@require_torch
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = np.random.randn(3 , 4 )
A : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
A : List[str] = np.random.randn(3 , 4 , 5 )
A : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_tf
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : List[Any] = np.random.randn(3 , 4 )
A : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE , (4, 3) ).numpy() ) )
A : Any = np.random.randn(3 , 4 , 5 )
A : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE , (12, 5) ).numpy() ) )
@require_flax
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = np.random.randn(3 , 4 )
A : Optional[int] = jnp.array(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE , (4, 3) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE , (4, 3) ) ) ) )
A : Union[str, Any] = np.random.randn(3 , 4 , 5 )
A : Union[str, Any] = jnp.array(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE , (12, 5) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE , (12, 5) ) ) ) )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : int = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE ) , np.squeeze(SCREAMING_SNAKE_CASE ) ) )
A : Optional[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE , axis=2 ) , np.squeeze(SCREAMING_SNAKE_CASE , axis=2 ) ) )
@require_torch
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[int] = np.random.randn(1 , 3 , 4 )
A : str = torch.tensor(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE ) , squeeze(SCREAMING_SNAKE_CASE ).numpy() ) )
A : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
A : Any = torch.tensor(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_tf
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : List[Any] = np.random.randn(1 , 3 , 4 )
A : List[Any] = tf.constant(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE ) , squeeze(SCREAMING_SNAKE_CASE ).numpy() ) )
A : List[str] = np.random.randn(1 , 4 , 1 , 5 )
A : List[str] = tf.constant(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE , axis=2 ).numpy() ) )
@require_flax
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : int = np.random.randn(1 , 3 , 4 )
A : str = jnp.array(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE ) ) ) )
A : str = np.random.randn(1 , 4 , 1 , 5 )
A : List[str] = jnp.array(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE , axis=2 ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE , axis=2 ) ) ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[str] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE , axis=1 ) , np.expand_dims(SCREAMING_SNAKE_CASE , axis=1 ) ) )
@require_torch
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Tuple = np.random.randn(3 , 4 )
A : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_tf
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : List[Any] = np.random.randn(3 , 4 )
A : Optional[int] = tf.constant(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE , axis=1 ).numpy() ) )
@require_flax
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : List[Any] = np.random.randn(3 , 4 )
A : Dict = jnp.array(SCREAMING_SNAKE_CASE )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE , axis=1 ) , np.asarray(expand_dims(SCREAMING_SNAKE_CASE , axis=1 ) ) ) )
| 311 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[int] = x
A : str = y
for step in range(snake_case__ ): # noqa: B007
A : str = a * a - b * b + x
A : List[str] = 2 * a * b + y
A : str = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) )
def lowerCAmelCase_ ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ):
'''simple docstring'''
A : List[Any] = Image.new('''RGB''' , (image_width, image_height) )
A : Tuple = img.load()
# loop through the image-coordinates
for image_x in range(snake_case__ ):
for image_y in range(snake_case__ ):
# determine the figure-coordinates based on the image-coordinates
A : Optional[int] = figure_width / image_width * image_height
A : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width
A : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height
A : str = get_distance(snake_case__ , snake_case__ , snake_case__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
A : str = get_color_coded_rgb(snake_case__ )
else:
A : List[Any] = get_black_and_white_rgb(snake_case__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowercase : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 311 | 1 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class __lowercase :
def __init__(self , A , A , A , A=None , A=None ):
lowerCamelCase_ : List[str] = start
lowerCamelCase_ : Optional[int] = end
lowerCamelCase_ : str = val
lowerCamelCase_ : str = (start + end) // 2
lowerCamelCase_ : Any = left
lowerCamelCase_ : List[str] = right
def __repr__(self ):
return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = collection
lowerCamelCase_ : Tuple = function
if self.collection:
lowerCamelCase_ : Dict = self._build_tree(0 , len(A ) - 1 )
def UpperCAmelCase__ (self , A , A ):
self._update_tree(self.root , A , A )
def UpperCAmelCase__ (self , A , A ):
return self._query_range(self.root , A , A )
def UpperCAmelCase__ (self , A , A ):
if start == end:
return SegmentTreeNode(A , A , self.collection[start] )
lowerCamelCase_ : List[Any] = (start + end) // 2
lowerCamelCase_ : List[str] = self._build_tree(A , A )
lowerCamelCase_ : List[str] = self._build_tree(mid + 1 , A )
return SegmentTreeNode(A , A , self.fn(left.val , right.val ) , A , A )
def UpperCAmelCase__ (self , A , A , A ):
if node.start == i and node.end == i:
lowerCamelCase_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left , A , A )
else:
self._update_tree(node.right , A , A )
lowerCamelCase_ : str = self.fn(node.left.val , node.right.val )
def UpperCAmelCase__ (self , A , A , A ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , A , A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , A , node.mid ) , self._query_range(node.right , node.mid + 1 , A ) , )
else:
# range in right child tree
return self._query_range(node.right , A , A )
def UpperCAmelCase__ (self ):
if self.root is not None:
lowerCamelCase_ : str = Queue()
queue.put(self.root )
while not queue.empty():
lowerCamelCase_ : List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowercase : List[str] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 318 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318 | 1 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 362 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def snake_case_ ( )-> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
_UpperCAmelCase : str = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowerCAmelCase_ )
# Let's go
_UpperCAmelCase : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCAmelCase_ , """func""" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : Optional[int] = args.func(lowerCAmelCase_ )
service.run()
if __name__ == "__main__":
main()
| 349 | 0 |
from collections import defaultdict
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__ ):
A : Optional[int] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
A : Any = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCamelCase__ ) )
]
A : Optional[Any] = defaultdict(lowerCamelCase__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
A : Union[str, Any] = (1 << len(lowerCamelCase__ )) - 1
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__ ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
A : List[str] = self.count_ways_until(lowerCamelCase__, task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p), task_no + 1 )
# save the value.
A : str = total_ways_util
return self.dp[mask][task_no]
def _lowerCAmelCase ( self, lowerCamelCase__ ):
# Store the list of persons for each task
for i in range(len(lowerCamelCase__ ) ):
for j in task_performed[i]:
self.task[j].append(lowerCamelCase__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0, 1 )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_:Optional[int] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
SCREAMING_SNAKE_CASE_:List[str] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 116 |
'''simple docstring'''
from collections.abc import Sequence
def lowerCamelCase ( __lowerCamelCase : Sequence[float] , __lowerCamelCase : bool = False ) ->float:
if not arr:
return 0
_SCREAMING_SNAKE_CASE = 0 if allow_empty_subarrays else float("""-inf""" )
_SCREAMING_SNAKE_CASE = 0.0
for num in arr:
_SCREAMING_SNAKE_CASE = max(0 if allow_empty_subarrays else num , curr_sum + num )
_SCREAMING_SNAKE_CASE = max(__lowerCamelCase , __lowerCamelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f"""{max_subarray_sum(nums) = }""")
| 58 | 0 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
random.seed(UpperCamelCase_ )
np.random.seed(UpperCamelCase_ )
torch.manual_seed(UpperCamelCase_ )
torch.cuda.manual_seed_all(UpperCamelCase_ )
# ^^ safe to call this function even if cuda is not available
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case = 0.9999 , __snake_case = 0.0 , __snake_case = 0 , __snake_case = False , __snake_case = 1.0 , __snake_case = 2 / 3 , __snake_case = None , __snake_case = None , **__snake_case , ):
if isinstance(__snake_case , torch.nn.Module ):
snake_case = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , __snake_case , standard_warn=__snake_case , )
snake_case = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
snake_case = True
if kwargs.get('''max_value''' , __snake_case ) is not None:
snake_case = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , __snake_case , standard_warn=__snake_case )
snake_case = kwargs['''max_value''']
if kwargs.get('''min_value''' , __snake_case ) is not None:
snake_case = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , __snake_case , standard_warn=__snake_case )
snake_case = kwargs['''min_value''']
snake_case = list(__snake_case )
snake_case = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , __snake_case ) is not None:
snake_case = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , __snake_case , standard_warn=__snake_case )
self.to(device=kwargs['''device'''] )
snake_case = None
snake_case = decay
snake_case = min_decay
snake_case = update_after_step
snake_case = use_ema_warmup
snake_case = inv_gamma
snake_case = power
snake_case = 0
snake_case = None # set in `step()`
snake_case = model_cls
snake_case = model_config
@classmethod
def a_ ( cls , __snake_case , __snake_case ):
snake_case , snake_case = model_cls.load_config(__snake_case , return_unused_kwargs=__snake_case )
snake_case = model_cls.from_pretrained(__snake_case )
snake_case = cls(model.parameters() , model_cls=__snake_case , model_config=model.config )
ema_model.load_state_dict(__snake_case )
return ema_model
def a_ ( self , __snake_case ):
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
snake_case = self.model_cls.from_config(self.model_config )
snake_case = self.state_dict()
state_dict.pop('''shadow_params''' , __snake_case )
model.register_to_config(**__snake_case )
self.copy_to(model.parameters() )
model.save_pretrained(__snake_case )
def a_ ( self , __snake_case ):
snake_case = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
snake_case = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
snake_case = (1 + step) / (1_0 + step)
snake_case = min(__snake_case , self.decay )
# make sure decay is not smaller than min_decay
snake_case = max(__snake_case , self.min_decay )
return cur_decay_value
@torch.no_grad()
def a_ ( self , __snake_case ):
if isinstance(__snake_case , torch.nn.Module ):
snake_case = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , __snake_case , standard_warn=__snake_case , )
snake_case = parameters.parameters()
snake_case = list(__snake_case )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
snake_case = self.get_decay(self.optimization_step )
snake_case = decay
snake_case = 1 - decay
snake_case = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , __snake_case ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
snake_case = deepspeed.zero.GatheredParameters(__snake_case , modifier_rank=__snake_case )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(__snake_case )
def a_ ( self , __snake_case ):
snake_case = list(__snake_case )
for s_param, param in zip(self.shadow_params , __snake_case ):
param.data.copy_(s_param.to(param.device ).data )
def a_ ( self , __snake_case=None , __snake_case=None ):
snake_case = [
p.to(device=__snake_case , dtype=__snake_case ) if p.is_floating_point() else p.to(device=__snake_case )
for p in self.shadow_params
]
def a_ ( self ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def a_ ( self , __snake_case ):
snake_case = [param.detach().cpu().clone() for param in parameters]
def a_ ( self , __snake_case ):
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , __snake_case ):
param.data.copy_(c_param.data )
# Better memory-wise.
snake_case = None
def a_ ( self , __snake_case ):
snake_case = copy.deepcopy(__snake_case )
snake_case = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
snake_case = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , __snake_case ):
raise ValueError('''Invalid min_decay''' )
snake_case = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , __snake_case ):
raise ValueError('''Invalid optimization_step''' )
snake_case = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , __snake_case ):
raise ValueError('''Invalid update_after_step''' )
snake_case = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , __snake_case ):
raise ValueError('''Invalid use_ema_warmup''' )
snake_case = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
snake_case = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
snake_case = state_dict.get('''shadow_params''' , __snake_case )
if shadow_params is not None:
snake_case = shadow_params
if not isinstance(self.shadow_params , __snake_case ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(__snake_case , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 352 |
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = len(UpperCamelCase_ )
snake_case = [[0] * n for i in range(UpperCamelCase_ )]
for i in range(UpperCamelCase_ ):
snake_case = y_points[i]
for i in range(2 ,UpperCamelCase_ ):
for j in range(UpperCamelCase_ ,UpperCamelCase_ ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 213 | 0 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : int ) ->str:
'''simple docstring'''
a : Optional[Any] = Mock()
a : Dict = conn, Mock()
a : Union[str, Any] = iter([1, None] )
a : Optional[int] = lambda _lowercase : next(_lowercase )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=_lowercase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 105 |
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[int]:
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase : Union[str, Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase ,output_loading_info=_lowerCAmelCase )
else:
__lowerCamelCase : Optional[int] = ProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase ,output_loading_info=_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = ['key_proj', 'value_proj', 'query_proj']
__lowerCamelCase : Optional[Any] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase : Optional[int] = key.split('.' )
if attributes[0] == "lm_head":
__lowerCamelCase : Dict = prophet
__lowerCamelCase : List[Any] = prophet_old
else:
__lowerCamelCase : Any = prophet.prophetnet
__lowerCamelCase : Any = prophet_old.model
__lowerCamelCase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase : Any = mapping[attribute]
if not hasattr(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) > 0:
__lowerCamelCase : int = attribute
elif hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase : List[Any] = old_model.weight
logger.info(F'{attribute} is initialized.' )
__lowerCamelCase : List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase : List[Any] = old_model.bias
logger.info(F'{attribute} is initialized' )
__lowerCamelCase : Dict = True
break
elif attribute in special_keys and hasattr(_lowerCAmelCase ,'in_proj_weight' ):
__lowerCamelCase : Optional[Any] = old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase : Optional[Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase : str = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase : Optional[int] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase : Dict = True
break
if attribute.isdigit():
__lowerCamelCase : List[str] = model[int(_lowerCAmelCase )]
__lowerCamelCase : Union[str, Any] = old_model[int(_lowerCAmelCase )]
else:
__lowerCamelCase : Union[str, Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if old_attribute == "":
__lowerCamelCase : str = old_model
else:
if not hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__lowerCamelCase : str = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 208 | 0 |
'''simple docstring'''
__lowercase : List[str] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : List[Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__lowercase : list[bool | None] = [None] * 10_00_00_00
__lowercase : Optional[Any] = True
__lowercase : Optional[int] = False
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__a : Tuple = chain(next_number(lowercase_ ) )
__a : int = number_chain
while number < 10_000_000:
__a : Optional[Any] = number_chain
number *= 10
return number_chain
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 10_000_000 ):
for i in range(1 , lowercase_ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 357 |
'''simple docstring'''
__lowercase : Optional[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__lowercase : List[str] = ['a', 'b', 'c', 'd', 'e']
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] ):
__a : Any = start
# add current to visited
visited.append(_SCREAMING_SNAKE_CASE )
__a : Union[str, Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__a : Dict = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# if all neighbors visited add current to sort
sort.append(_SCREAMING_SNAKE_CASE )
# if all vertices haven't been visited select a new one to visit
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
for vertice in vertices:
if vertice not in visited:
__a : List[Any] = topological_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# return sort
return sort
if __name__ == "__main__":
__lowercase : Union[str, Any] = topological_sort('a', [], [])
print(sort)
| 294 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __A( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """pixel_values"""
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = TimmBackboneConfig
def __init__(self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , """timm""" )
super().__init__(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F"backbone {config.backbone} is not supported by timm." )
if hasattr(SCREAMING_SNAKE_CASE_ , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , """use_pretrained_backbone""" , SCREAMING_SNAKE_CASE_ )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
UpperCamelCase__ = config.out_indices if getattr(SCREAMING_SNAKE_CASE_ , """out_indices""" , SCREAMING_SNAKE_CASE_ ) is not None else (-1,)
UpperCamelCase__ = timm.create_model(
config.backbone , pretrained=SCREAMING_SNAKE_CASE_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
UpperCamelCase__ = self._backbone.return_layers
UpperCamelCase__ = {layer["""module"""]: str(SCREAMING_SNAKE_CASE_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(SCREAMING_SNAKE_CASE_ )
@classmethod
def UpperCAmelCase_ (cls , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
UpperCamelCase__ = kwargs.pop("""config""" , TimmBackboneConfig() )
UpperCamelCase__ = kwargs.pop("""use_timm_backbone""" , SCREAMING_SNAKE_CASE_ )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
UpperCamelCase__ = kwargs.pop("""num_channels""" , config.num_channels )
UpperCamelCase__ = kwargs.pop("""features_only""" , config.features_only )
UpperCamelCase__ = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
UpperCamelCase__ = kwargs.pop("""out_indices""" , config.out_indices )
UpperCamelCase__ = TimmBackboneConfig(
backbone=SCREAMING_SNAKE_CASE_ , num_channels=SCREAMING_SNAKE_CASE_ , features_only=SCREAMING_SNAKE_CASE_ , use_pretrained_backbone=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , )
return super()._from_config(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
pass
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase__ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
UpperCamelCase__ = self._all_layers
UpperCamelCase__ = self._backbone(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self._return_layers
UpperCamelCase__ = tuple(hidden_states[i] for i in self.out_indices )
else:
UpperCamelCase__ = self._backbone(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = None
UpperCamelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tuple(SCREAMING_SNAKE_CASE_ ) if hidden_states is not None else None
if not return_dict:
UpperCamelCase__ = (feature_maps,)
if output_hidden_states:
UpperCamelCase__ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=SCREAMING_SNAKE_CASE_ , hidden_states=SCREAMING_SNAKE_CASE_ , attentions=SCREAMING_SNAKE_CASE_ )
| 244 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """visual_bert"""
def __init__(self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = visual_embedding_dim
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = bypass_transformer
UpperCamelCase__ = special_visual_initialize
| 244 | 1 |
def snake_case( __magic_name__ = 1_00 ) -> int:
'''simple docstring'''
lowercase : str = n * (n + 1) * (2 * n + 1) / 6
lowercase : Dict = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''') | 352 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCAmelCase_ = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : int = '''maskformer'''
_UpperCamelCase : Union[str, Any] = {'''hidden_size''': '''mask_feature_size'''}
_UpperCamelCase : Dict = ['''resnet''', '''swin''']
_UpperCamelCase : Optional[int] = ['''detr''']
def __init__( self : Any , _A : int = 256 , _A : int = 256 , _A : float = 0.1 , _A : bool = False , _A : Optional[Dict] = None , _A : Optional[Dict] = None , _A : float = 0.02 , _A : float = 1.0 , _A : float = 1.0 , _A : float = 1.0 , _A : float = 20.0 , _A : Optional[bool] = None , **_A : str , ) -> str:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase : List[str] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(_A , _A ):
lowercase : Optional[int] = backbone_config.pop('''model_type''' )
lowercase : List[str] = CONFIG_MAPPING[backbone_model_type]
lowercase : Union[str, Any] = config_class.from_dict(_A )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
f"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase : Any = DetrConfig()
else:
# verify that the decoder is supported
lowercase : Union[str, Any] = (
decoder_config.pop('''model_type''' ) if isinstance(_A , _A ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f"""Transformer Decoder {decoder_type} not supported, please use one of"""
f""" {','.join(self.decoders_supported )}""" )
if isinstance(_A , _A ):
lowercase : str = CONFIG_MAPPING[decoder_type]
lowercase : Dict = config_class.from_dict(_A )
lowercase : Tuple = backbone_config
lowercase : List[Any] = decoder_config
# main feature dimension for the model
lowercase : Optional[int] = fpn_feature_size
lowercase : List[Any] = mask_feature_size
# initializer
lowercase : Union[str, Any] = init_std
lowercase : Tuple = init_xavier_std
# Hungarian matcher && loss
lowercase : List[str] = cross_entropy_weight
lowercase : int = dice_weight
lowercase : List[Any] = mask_weight
lowercase : Tuple = use_auxiliary_loss
lowercase : Tuple = no_object_weight
lowercase : int = output_auxiliary_logits
lowercase : List[Any] = self.decoder_config.encoder_attention_heads
lowercase : List[Any] = self.decoder_config.num_hidden_layers
super().__init__(**_A )
@classmethod
def __a ( cls : Optional[int] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : str ) -> Any:
"""simple docstring"""
return cls(
backbone_config=_A , decoder_config=_A , **_A , )
def __a ( self : Optional[int] ) -> Dict[str, any]:
"""simple docstring"""
lowercase : str = copy.deepcopy(self.__dict__ )
lowercase : Optional[Any] = self.backbone_config.to_dict()
lowercase : List[Any] = self.decoder_config.to_dict()
lowercase : Optional[int] = self.__class__.model_type
return output | 116 | 0 |
import os
def a__ ( UpperCAmelCase : str = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(UpperCAmelCase ) , UpperCAmelCase ) ) as input_file:
UpperCAmelCase : Tuple = [
[int(UpperCAmelCase ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
UpperCAmelCase : Tuple = len(UpperCAmelCase )
UpperCAmelCase : Optional[int] = len(matrix[0] )
UpperCAmelCase : Union[str, Any] = [[-1 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
for i in range(UpperCAmelCase ):
UpperCAmelCase : int = matrix[i][0]
for j in range(1 , UpperCAmelCase ):
for i in range(UpperCAmelCase ):
UpperCAmelCase : List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCAmelCase ):
UpperCAmelCase : List[str] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCAmelCase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 336 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : List[str]=False ) -> Any:
UpperCAmelCase : Optional[int] = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', '''beit.embeddings.cls_token'''),
(f'''{prefix}patch_embed.proj.weight''', '''beit.embeddings.patch_embeddings.projection.weight'''),
(f'''{prefix}patch_embed.proj.bias''', '''beit.embeddings.patch_embeddings.projection.bias'''),
(f'''{prefix}pos_embed''', '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str=False , UpperCAmelCase : Dict=False ) -> Any:
for i in range(config.num_hidden_layers ):
UpperCAmelCase : Tuple = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
UpperCAmelCase : List[Any] = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
UpperCAmelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase : str = q_bias
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase : int = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
UpperCAmelCase : Optional[Any] = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
UpperCAmelCase : str = gamma_a
UpperCAmelCase : Dict = gamma_a
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = dct.pop(UpperCAmelCase )
UpperCAmelCase : str = val
def a__ ( ) -> Optional[int]:
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=False ) -> Union[str, Any]:
UpperCAmelCase : Dict = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase : Any = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase : List[Any] = 1_024
UpperCAmelCase : Optional[Any] = 4_096
UpperCAmelCase : Any = 24
UpperCAmelCase : Union[str, Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 16
UpperCAmelCase : List[Any] = '''huggingface/label-files'''
UpperCAmelCase : Any = '''rvlcdip-id2label.json'''
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase : Dict = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = idalabel
UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='''cpu''' )['''model''']
UpperCAmelCase : List[str] = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase )
# load HuggingFace model
UpperCAmelCase : Tuple = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase )
model.eval()
model.load_state_dict(UpperCAmelCase )
# Check outputs on an image
UpperCAmelCase : Dict = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(images=UpperCAmelCase , return_tensors='''pt''' )
UpperCAmelCase : str = encoding['''pixel_values''']
UpperCAmelCase : Any = model(UpperCAmelCase )
UpperCAmelCase : Optional[Any] = outputs.logits
# verify logits
UpperCAmelCase : List[Any] = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected"
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCAmelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
if has_lm_head:
UpperCAmelCase : List[Any] = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase : Any = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCAmelCase , )
model.push_to_hub(
repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCAmelCase , )
if __name__ == "__main__":
_lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 336 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Tuple = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class snake_case__ ( snake_case_ ):
_snake_case : Optional[int] = """rwkv"""
_snake_case : List[Any] = {"""max_position_embeddings""": """context_length"""}
def __init__( self , lowerCamelCase=50277 , lowerCamelCase=1024 , lowerCamelCase=4096 , lowerCamelCase=32 , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=1E-5 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=6 , lowerCamelCase=False , lowerCamelCase=True , **lowerCamelCase , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
| 356 | """simple docstring"""
import math
def _lowerCamelCase( a ):
__a = []
__a = 2
__a = int(math.sqrt(a ) ) # Size of every segment
__a = [True] * (end + 1)
__a = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
__a = False
start += 1
prime += in_prime
__a = end + 1
__a = min(2 * end , a )
while low <= n:
__a = [True] * (high - low + 1)
for each in in_prime:
__a = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
__a = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
__a = high + 1
__a = min(high + end , a )
return prime
print(sieve(10**6))
| 268 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase :Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : Any = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
_UpperCAmelCase : Optional[Any] = 192
_UpperCAmelCase : int = 768
_UpperCAmelCase : List[Any] = 12
_UpperCAmelCase : Dict = 3
_UpperCAmelCase : Any = [800, 1333]
_UpperCAmelCase : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
_UpperCAmelCase : str = 330
_UpperCAmelCase : Optional[Any] = 14
_UpperCAmelCase : int = 6
_UpperCAmelCase : Optional[int] = 1320
elif "yolos_s" in yolos_name:
_UpperCAmelCase : Any = 384
_UpperCAmelCase : Dict = 1536
_UpperCAmelCase : Any = 12
_UpperCAmelCase : Optional[Any] = 6
elif "yolos_b" in yolos_name:
_UpperCAmelCase : List[Any] = [800, 1344]
_UpperCAmelCase : Dict = 91
_UpperCAmelCase : Optional[int] = '''huggingface/label-files'''
_UpperCAmelCase : str = '''coco-detection-id2label.json'''
_UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) )
_UpperCAmelCase : Dict = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_UpperCAmelCase : Any = idalabel
_UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ (UpperCamelCase__ : dict , UpperCamelCase__ : YolosConfig , UpperCamelCase__ : bool = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[Any] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
_UpperCAmelCase : str = state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Tuple = in_proj_weight[: config.hidden_size, :]
_UpperCAmelCase : Any = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : Optional[int] = in_proj_weight[-config.hidden_size :, :]
_UpperCAmelCase : int = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_ (UpperCamelCase__ : str ):
if "backbone" in name:
_UpperCAmelCase : int = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
_UpperCAmelCase : int = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
_UpperCAmelCase : Optional[int] = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
_UpperCAmelCase : str = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
_UpperCAmelCase : Optional[Any] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_UpperCAmelCase : List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
_UpperCAmelCase : Tuple = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
_UpperCAmelCase : int = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_UpperCAmelCase : Dict = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_UpperCAmelCase : Any = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_UpperCAmelCase : Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_UpperCAmelCase : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_UpperCAmelCase : str = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
_UpperCAmelCase : str = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
_UpperCAmelCase : List[str] = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
_UpperCAmelCase : Any = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def lowerCamelCase_ (UpperCamelCase__ : dict , UpperCamelCase__ : YolosForObjectDetection ):
for key in orig_state_dict.copy().keys():
_UpperCAmelCase : List[str] = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
_UpperCAmelCase : Tuple = key.split('''.''' )
_UpperCAmelCase : Dict = int(key_split[2] )
_UpperCAmelCase : Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
_UpperCAmelCase : List[Any] = val[:dim, :]
_UpperCAmelCase : Optional[Any] = val[
dim : dim * 2, :
]
_UpperCAmelCase : Any = val[-dim:, :]
else:
_UpperCAmelCase : Tuple = val[:dim]
_UpperCAmelCase : Union[str, Any] = val[dim : dim * 2]
_UpperCAmelCase : Optional[int] = val[-dim:]
else:
_UpperCAmelCase : Optional[Any] = val
return orig_state_dict
def lowerCamelCase_ ():
_UpperCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCAmelCase : Optional[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ):
_UpperCAmelCase : Any = get_yolos_config(UpperCamelCase__ )
# load original state_dict
_UpperCAmelCase : Dict = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model''']
# load 🤗 model
_UpperCAmelCase : str = YolosForObjectDetection(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : Union[str, Any] = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
_UpperCAmelCase : Union[str, Any] = 800 if yolos_name != '''yolos_ti''' else 512
_UpperCAmelCase : Tuple = YolosImageProcessor(format='''coco_detection''' , size=UpperCamelCase__ )
_UpperCAmelCase : int = image_processor(images=prepare_img() , return_tensors='''pt''' )
_UpperCAmelCase : Dict = model(**UpperCamelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[str] = outputs.logits, outputs.pred_boxes
_UpperCAmelCase , _UpperCAmelCase : List[str] = None, None
if yolos_name == "yolos_ti":
_UpperCAmelCase : str = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
_UpperCAmelCase : Any = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
_UpperCAmelCase : Optional[int] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
_UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
_UpperCAmelCase : int = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
_UpperCAmelCase : Optional[int] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
_UpperCAmelCase : Dict = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
_UpperCAmelCase : Optional[int] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F'Unknown yolos_name: {yolos_name}' )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1E-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
_UpperCAmelCase : List[str] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
_UpperCAmelCase : Optional[Any] = model_mapping[yolos_name]
image_processor.push_to_hub(UpperCamelCase__ , organization='''hustvl''' )
model.push_to_hub(UpperCamelCase__ , organization='''hustvl''' )
if __name__ == "__main__":
_lowerCAmelCase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--yolos_name',
default='yolos_s_200_pre',
type=str,
help=(
'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','
' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'
),
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCAmelCase :int = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 263 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
_lowerCAmelCase :str = object()
# For specifying empty leaf dict `{}`
_lowerCAmelCase :str = object()
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ):
_UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ):
_UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )]
if matches and all(UpperCamelCase__ ):
return True
return False
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
for rule, replacement in rules:
if _match(UpperCamelCase__ , UpperCamelCase__ ):
return replacement
return val
return replace
def lowerCamelCase_ ():
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : List[str] = _get_partition_rules()
_UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )}
_UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(UpperCamelCase__ ) )
| 263 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
# word like '180' or '身高' or '神'
for char in word:
__UpperCamelCase =ord(SCREAMING_SNAKE_CASE__ )
if not _is_chinese_char(SCREAMING_SNAKE_CASE__ ):
return 0
return 1
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =set()
for token in tokens:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) > 1 and is_chinese(SCREAMING_SNAKE_CASE__ )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =list(SCREAMING_SNAKE_CASE__ )
return word_list
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : set() ):
if not chinese_word_set:
return bert_tokens
__UpperCamelCase =max([len(SCREAMING_SNAKE_CASE__ ) for w in chinese_word_set] )
__UpperCamelCase =bert_tokens
__UpperCamelCase , __UpperCamelCase =0, len(SCREAMING_SNAKE_CASE__ )
while start < end:
__UpperCamelCase =True
if is_chinese(bert_word[start] ):
__UpperCamelCase =min(end - start , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ , 1 , -1 ):
__UpperCamelCase =''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__UpperCamelCase ='##' + bert_word[j]
__UpperCamelCase =start + i
__UpperCamelCase =False
break
if single_word:
start += 1
return bert_word
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : LTP , SCREAMING_SNAKE_CASE__ : BertTokenizer ):
__UpperCamelCase =[]
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 1_00 ):
__UpperCamelCase =ltp_tokenizer.pipeline(lines[i : i + 1_00] , tasks=['cws'] ).cws
__UpperCamelCase =[get_chinese_word(SCREAMING_SNAKE_CASE__ ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 1_00 ):
__UpperCamelCase =bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=5_12 )
bert_res.extend(res['input_ids'] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[]
for id in input_ids:
__UpperCamelCase =bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE__ )
input_tokens.append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =add_sub_symbol(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
if token[:2] == "##":
__UpperCamelCase =token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE__ ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE__ ) ):
ref_id.append(SCREAMING_SNAKE_CASE__ )
ref_ids.append(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
return ref_ids
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__UpperCamelCase =f.readlines()
__UpperCamelCase =[line.strip() for line in data if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__UpperCamelCase =LTP(args.ltp ) # faster in GPU device
__UpperCamelCase =BertTokenizer.from_pretrained(args.bert )
__UpperCamelCase =prepare_ref(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__UpperCamelCase =[json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
_A = parser.parse_args()
main(args)
| 117 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_A = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "The column name of the images in the files."} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the training data."} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "A folder containing the validation data."} )
UpperCAmelCase__ : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _a ( self ) -> Optional[int]:
__UpperCamelCase ={}
if self.train_dir is not None:
__UpperCamelCase =self.train_dir
if self.validation_dir is not None:
__UpperCamelCase =self.validation_dir
__UpperCamelCase =data_files if data_files else None
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : str = field(
default=A_ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
UpperCAmelCase__ : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "Name or path of preprocessor config."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
UpperCAmelCase__ : float = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : float = field(
default=1e-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def _UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__UpperCamelCase =HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__UpperCamelCase =training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__UpperCamelCase =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__UpperCamelCase =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__UpperCamelCase =None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0:
__UpperCamelCase =ds['train'].train_test_split(data_args.train_val_split )
__UpperCamelCase =split['train']
__UpperCamelCase =split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase ={
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.config_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
__UpperCamelCase =ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
__UpperCamelCase =ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__UpperCamelCase =ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__UpperCamelCase =ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
if training_args.do_train:
__UpperCamelCase =ds['train'].column_names
else:
__UpperCamelCase =ds['validation'].column_names
if data_args.image_column_name is not None:
__UpperCamelCase =data_args.image_column_name
elif "image" in column_names:
__UpperCamelCase ='image'
elif "img" in column_names:
__UpperCamelCase ='img'
else:
__UpperCamelCase =column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__UpperCamelCase =image_processor.size['shortest_edge']
else:
__UpperCamelCase =(image_processor.size['height'], image_processor.size['width'])
__UpperCamelCase =Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(SCREAMING_SNAKE_CASE__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =[transforms(SCREAMING_SNAKE_CASE__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__UpperCamelCase =ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__UpperCamelCase =(
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE__ )
# Compute absolute learning rate
__UpperCamelCase =(
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__UpperCamelCase =training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
__UpperCamelCase =Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
__UpperCamelCase =None
if training_args.resume_from_checkpoint is not None:
__UpperCamelCase =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__UpperCamelCase =last_checkpoint
__UpperCamelCase =trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__UpperCamelCase =trainer.evaluate()
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
__UpperCamelCase ={
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 117 | 1 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __lowerCamelCase ( ) -> int:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase_ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def __lowerCamelCase ( ) -> str:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def __lowerCamelCase ( ) -> Optional[int]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase_ ):
http_head('https://huggingface.co' )
| 89 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
) | 112 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class snake_case_ :
def __init__( self : List[Any] , lowercase_ : int ) -> None:
lowercase__ : Dict = value
lowercase__ : Node | None = None
lowercase__ : Node | None = None
class snake_case_ :
def __init__( self : Union[str, Any] , lowercase_ : Node ) -> None:
lowercase__ : Tuple = tree
def __UpperCamelCase ( self : int , lowercase_ : Node | None ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : int ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase = logging.getLogger(__name__)
class snake_case_ ( __A ):
__A : int = "token-classification"
def __init__( self : Tuple , lowercase_ : Dict ) -> List[str]:
if type(lowercase_ ) == dict:
lowercase__ : Dict = Namespace(**lowercase_ )
lowercase__ : str = import_module("tasks" )
try:
lowercase__ : Tuple = getattr(lowercase_ , hparams.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
lowercase__ : Optional[Any] = self.token_classification_task.get_labels(hparams.labels )
lowercase__ : int = CrossEntropyLoss().ignore_index
super().__init__(lowercase_ , len(self.labels ) , self.mode )
def __UpperCamelCase ( self : Union[str, Any] , **lowercase_ : List[str] ) -> Any:
return self.model(**lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] ) -> Tuple:
lowercase__ : int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : Tuple = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : Optional[int] = self(**lowercase_ )
lowercase__ : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowercase__ : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ : Any = self._feature_file(lowercase_ )
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowercase__ : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , lowercase_ )
lowercase__ : Dict = self.token_classification_task.convert_examples_to_features(
lowercase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=lowercase_ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , lowercase_ )
torch.save(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : bool = False ) -> DataLoader:
lowercase__ : str = self._feature_file(lowercase_ )
logger.info("Loading features from cached file %s" , lowercase_ )
lowercase__ : str = torch.load(lowercase_ )
lowercase__ : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ : Dict = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ : Dict = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ : List[str] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) , batch_size=lowercase_ )
def __UpperCamelCase ( self : str , lowercase_ : Dict , lowercase_ : Tuple ) -> str:
"""Compute validation""" ""
lowercase__ : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
lowercase__ : int = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ : List[Any] = self(**lowercase_ )
lowercase__ , lowercase__ : Any = outputs[:2]
lowercase__ : Optional[Any] = logits.detach().cpu().numpy()
lowercase__ : int = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any ) -> List[Any]:
lowercase__ : int = torch.stack([x["val_loss"] for x in outputs] ).mean()
lowercase__ : Any = np.concatenate([x["pred"] for x in outputs] , axis=0 )
lowercase__ : Dict = np.argmax(lowercase_ , axis=2 )
lowercase__ : int = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowercase__ : Any = dict(enumerate(self.labels ) )
lowercase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ : Dict = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ : Any = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(lowercase_ , lowercase_ ),
"precision": precision_score(lowercase_ , lowercase_ ),
"recall": recall_score(lowercase_ , lowercase_ ),
"f1": fa_score(lowercase_ , lowercase_ ),
}
lowercase__ : List[Any] = dict(results.items() )
lowercase__ : List[str] = results
return ret, preds_list, out_label_list
def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Dict:
# when stable
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
lowercase__ : Any = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCamelCase ( self : str , lowercase_ : Tuple ) -> int:
# updating to test_epoch_end instead of deprecated test_end
lowercase__ , lowercase__ , lowercase__ : Dict = self._eval_end(lowercase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ : Optional[int] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCamelCase ( lowercase_ : int , lowercase_ : Union[str, Any] ) -> Tuple:
# Add NER specific options
BaseTransformer.add_model_specific_args(lowercase_ , lowercase_ )
parser.add_argument(
"--task_type" , default="NER" , type=lowercase_ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=lowercase_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=lowercase_ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=lowercase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase = parser.parse_args()
UpperCamelCase = NERTransformer(args)
UpperCamelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
UpperCamelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 333 | 1 |
'''simple docstring'''
import math
import qiskit
def __UpperCAmelCase ( a_: int = 1, a_: int = 1, a_: int = 1 ):
if (
isinstance(a_, a_ )
or isinstance(a_, a_ )
or isinstance(a_, a_ )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(a_ ) != input_a)
or (math.floor(a_ ) != input_a)
or (math.floor(a_ ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
_UpperCAmelCase : str = qiskit.QuantumRegister(4, "qr" )
_UpperCAmelCase : str = qiskit.ClassicalRegister(2, "cr" )
# list the entries
_UpperCAmelCase : Any = [input_a, input_a, carry_in]
_UpperCAmelCase : Any = qiskit.QuantumCircuit(a_, a_ )
for i in range(0, 3 ):
if entry[i] == 2:
quantum_circuit.h(a_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0, 1, 3 ) # ccx = toffoli gate
quantum_circuit.cx(0, 1 )
quantum_circuit.ccx(1, 2, 3 )
quantum_circuit.cx(1, 2 )
quantum_circuit.cx(0, 1 )
quantum_circuit.measure([2, 3], a_ ) # measure the last two qbits
_UpperCAmelCase : Any = qiskit.Aer.get_backend("aer_simulator" )
_UpperCAmelCase : Union[str, Any] = qiskit.execute(a_, a_, shots=1_000 )
return job.result().get_counts(a_ )
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}') | 145 | '''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 145 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __magic_name__ ( __lowercase , unittest.TestCase):
A: Optional[int] = KandinskyInpaintPipeline
A: Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
A: Union[str, Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
A: Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
A: Any = False
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
return 32
@property
def UpperCAmelCase__ ( self : Tuple ) -> int:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase__ ( self : Dict ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return 100
@property
def UpperCAmelCase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCamelCase__ : Tuple = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def UpperCAmelCase__ ( self : int ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCamelCase__ : Optional[Any] = MultilingualCLIP(snake_case_ )
UpperCamelCase__ : str = text_encoder.eval()
return text_encoder
@property
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCamelCase__ : List[str] = UNetaDConditionModel(**snake_case_ )
return model
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = self.dummy_text_encoder
UpperCamelCase__ : Dict = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_unet
UpperCamelCase__ : Optional[int] = self.dummy_movq
UpperCamelCase__ : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case_ , )
UpperCamelCase__ : Optional[int] = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any=0 ) -> Any:
'''simple docstring'''
UpperCamelCase__ : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
UpperCamelCase__ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case_ )
# create init_image
UpperCamelCase__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
UpperCamelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ : Optional[int] = Image.fromarray(np.uinta(snake_case_ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
UpperCamelCase__ : str = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase__ : Dict = 0
if str(snake_case_ ).startswith('''mps''' ):
UpperCamelCase__ : int = torch.manual_seed(snake_case_ )
else:
UpperCamelCase__ : Optional[int] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
UpperCamelCase__ : List[Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''cpu'''
UpperCamelCase__ : List[Any] = self.get_dummy_components()
UpperCamelCase__ : Tuple = self.pipeline_class(**snake_case_ )
UpperCamelCase__ : Any = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ : Optional[Any] = pipe(**self.get_dummy_inputs(snake_case_ ) )
UpperCamelCase__ : Union[str, Any] = output.images
UpperCamelCase__ : Dict = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : Union[str, Any] = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def UpperCAmelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
UpperCamelCase__ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCamelCase__ : Union[str, Any] = np.ones((768, 768) , dtype=np.floataa )
UpperCamelCase__ : Union[str, Any] = 0
UpperCamelCase__ : Tuple = '''a hat'''
UpperCamelCase__ : int = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
UpperCamelCase__ : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
UpperCamelCase__ : Any = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
UpperCamelCase__ : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCamelCase__ : List[str] = pipeline(
snake_case_ , image=snake_case_ , mask_image=snake_case_ , image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 351 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __magic_name__ ( __lowerCAmelCase):
A: Optional[int] = ["image_processor", "tokenizer"]
A: int = "FlavaImageProcessor"
A: List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : int ) -> int:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowerCamelCase__ , )
UpperCamelCase__ : Union[str, Any] = kwargs.pop('''feature_extractor''' )
UpperCamelCase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : List[str] = self.image_processor
def __call__( self : int , lowerCamelCase__ : Optional[ImageInput] = None , lowerCamelCase__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : List[str] , ) -> Any:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCamelCase__ : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
if images is not None:
UpperCamelCase__ : Optional[int] = self.image_processor(
lowerCamelCase__ , return_image_mask=lowerCamelCase__ , return_codebook_pixels=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCamelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Dict , *lowerCamelCase__ : Dict , **lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.tokenizer.model_input_names
UpperCamelCase__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCamelCase__ , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCamelCase__ , )
return self.image_processor
| 51 | 0 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase : str =logging.get_logger(__name__)
UpperCAmelCase : Any ={
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """owlvit_text_model"""
def __init__( self , snake_case__=4_9408 , snake_case__=512 , snake_case__=2048 , snake_case__=12 , snake_case__=8 , snake_case__=16 , snake_case__="quick_gelu" , snake_case__=1e-5 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1.0 , snake_case__=0 , snake_case__=4_9406 , snake_case__=4_9407 , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = hidden_act
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = initializer_range
UpperCamelCase_ = initializer_factor
@classmethod
def _lowerCamelCase ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCamelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """owlvit_vision_model"""
def __init__( self , snake_case__=768 , snake_case__=3072 , snake_case__=12 , snake_case__=12 , snake_case__=3 , snake_case__=768 , snake_case__=32 , snake_case__="quick_gelu" , snake_case__=1e-5 , snake_case__=0.0 , snake_case__=0.02 , snake_case__=1.0 , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = num_channels
UpperCamelCase_ = image_size
UpperCamelCase_ = patch_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = initializer_range
UpperCamelCase_ = initializer_factor
@classmethod
def _lowerCamelCase ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCamelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """owlvit"""
lowercase__ = True
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=512 , snake_case__=2.6_592 , snake_case__=True , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
if text_config is None:
UpperCamelCase_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCamelCase_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCamelCase_ = OwlViTTextConfig(**snake_case__ )
UpperCamelCase_ = OwlViTVisionConfig(**snake_case__ )
UpperCamelCase_ = projection_dim
UpperCamelCase_ = logit_scale_init_value
UpperCamelCase_ = return_dict
UpperCamelCase_ = 1.0
@classmethod
def _lowerCamelCase ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
cls._set_token_in_kwargs(snake_case__ )
UpperCamelCase_ , UpperCamelCase_ = cls.get_config_dict(snake_case__ , **snake_case__ )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
@classmethod
def _lowerCamelCase ( cls , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = {}
UpperCamelCase_ = text_config
UpperCamelCase_ = vision_config
return cls.from_dict(snake_case__ , **snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
UpperCamelCase_ = self.text_config.to_dict()
UpperCamelCase_ = self.vision_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
class _lowercase (a_ ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return 1e-4
def _lowerCamelCase ( self , snake_case__ , snake_case__ = -1 , snake_case__ = -1 , snake_case__ = None , ):
'''simple docstring'''
UpperCamelCase_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=snake_case__ , seq_length=snake_case__ , framework=snake_case__ )
UpperCamelCase_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=snake_case__ , framework=snake_case__ )
return {**text_input_dict, **image_input_dict}
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return 14
| 128 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCAmelCase : Dict =argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
UpperCAmelCase : Optional[int] =parser.parse_args()
UpperCAmelCase : List[Any] =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCAmelCase : List[str] =CLIPImageProcessor()
UpperCAmelCase : Optional[int] =CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
UpperCAmelCase : Any =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 128 | 1 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def a__ ( lowerCAmelCase=32 , lowerCAmelCase=10 , lowerCAmelCase=1_00 , lowerCAmelCase=10_26 , lowerCAmelCase=True , lowerCAmelCase="data/tokenized_stories_train_wikitext103.jbl" , lowerCAmelCase="igf_context_pairs.jbl" , ) -> Optional[int]:
set_seed(3 )
# generate train_data and objective_set
UpperCAmelCase__ : Optional[int] = generate_datasets(
lowerCAmelCase , lowerCAmelCase , number=lowerCAmelCase , min_len=10_26 , trim=lowerCAmelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCAmelCase__ : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
UpperCAmelCase__ : List[Any] = load_gpta("""gpt2""" ).to(lowerCAmelCase )
print("""computing perplexity on objective set""" )
UpperCAmelCase__ : Dict = compute_perplexity(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).item()
print("""perplexity on objective set:""" , lowerCAmelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def a__ ( lowerCAmelCase , lowerCAmelCase=15 , lowerCAmelCase=1_28 , lowerCAmelCase=1_00 , lowerCAmelCase="igf_model.pt" , ) -> Optional[int]:
set_seed(42 )
# Load pre-trained model
UpperCAmelCase__ : Optional[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
UpperCAmelCase__ : List[Any] = SecondaryLearner(lowerCAmelCase )
# Train secondary learner
UpperCAmelCase__ : Union[str, Any] = train_secondary_learner(
lowerCAmelCase , lowerCAmelCase , max_epochs=lowerCAmelCase , batch_size=lowerCAmelCase , eval_freq=1_00 , igf_model_path=lowerCAmelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=32 , lowerCAmelCase=10_00 , lowerCAmelCase=16 , lowerCAmelCase=1.0 , lowerCAmelCase=recopy_gpta , lowerCAmelCase=None , lowerCAmelCase=10 , lowerCAmelCase="gpt2_finetuned.pt" , ) -> Tuple:
UpperCAmelCase__ : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
UpperCAmelCase__ : Tuple = RandomSampler(lowerCAmelCase )
UpperCAmelCase__ : Dict = DataLoader(lowerCAmelCase , sampler=lowerCAmelCase )
UpperCAmelCase__ : Dict = max_steps // (len(lowerCAmelCase )) + 1
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=lowerCAmelCase )
UpperCAmelCase__ : Dict = recopy_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowerCAmelCase )
secondary_learner.eval()
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Any = []
# Compute the performance of the transformer model at the beginning
UpperCAmelCase__ : Tuple = compute_perplexity(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
test_perps.append(lowerCAmelCase )
print("""Test perplexity, step""" , lowerCAmelCase , """:""" , lowerCAmelCase )
for epoch in range(int(lowerCAmelCase ) ):
for step, example in enumerate(lowerCAmelCase ):
torch.cuda.empty_cache()
UpperCAmelCase__ : int = random.randint(0 , example.size(2 ) - context_len - 1 )
UpperCAmelCase__ : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCAmelCase__ : str = model(lowerCAmelCase , labels=lowerCAmelCase )
UpperCAmelCase__ : str = True
if secondary_learner is not None:
UpperCAmelCase__ : List[Any] = secondary_learner.forward(
torch.tensor(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowerCAmelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCAmelCase__ : Any = -1
if predicted_q < threshold:
UpperCAmelCase__ : Any = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
UpperCAmelCase__ : Tuple = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCAmelCase__ : Optional[int] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCAmelCase__ : Tuple = compute_perplexity(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
test_perps.append(lowerCAmelCase )
print("""Test perplexity, step""" , lowerCAmelCase , """:""" , lowerCAmelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , lowerCAmelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def a__ ( ) -> Tuple:
UpperCAmelCase__ : Any = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=lowerCAmelCase , default=lowerCAmelCase , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=lowerCAmelCase , default=lowerCAmelCase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=lowerCAmelCase , type=lowerCAmelCase , required=lowerCAmelCase , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=lowerCAmelCase , default=lowerCAmelCase , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=1_00 , type=lowerCAmelCase , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=1_00 , type=lowerCAmelCase , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=10_00 , type=lowerCAmelCase , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=1_28 , type=lowerCAmelCase , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=lowerCAmelCase , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=lowerCAmelCase , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=1_00 , type=lowerCAmelCase , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=10_26 , type=lowerCAmelCase , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=lowerCAmelCase , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=lowerCAmelCase , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=lowerCAmelCase , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=lowerCAmelCase , type=lowerCAmelCase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=lowerCAmelCase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
UpperCAmelCase__ : List[Any] = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
UpperCAmelCase__ : List[str] = training_secondary_learner(
lowerCAmelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
UpperCAmelCase__ : int = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
UpperCAmelCase__ : List[str] = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=1_00 , min_len=10_26 , trim=lowerCAmelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=lowerCAmelCase , secondary_learner=lowerCAmelCase , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 356 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=30 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=10 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=2 , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Tuple = patch_size
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Optional[int] = use_labels
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : str = scope
UpperCAmelCase__ : Optional[Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase__ : int = (image_size // patch_size) ** 2
UpperCAmelCase__ : Tuple = num_patches + 2
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _a (self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = DeiTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DeiTForMaskedImageModeling(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : List[str] = DeiTForMaskedImageModeling(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Dict = model(_lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.type_sequence_label_size
UpperCAmelCase__ : List[str] = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : str = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : int = DeiTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Tuple = config_and_inputs
UpperCAmelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = DeiTModelTester(self )
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def _a (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _a (self ):
"""simple docstring"""
pass
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase , nn.Linear ) )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_lowerCamelCase )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def _a (self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _a (self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase__ : Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : int = model(**_lowerCamelCase ).loss
loss.backward()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
UpperCAmelCase__ : Tuple = model(**_lowerCamelCase ).loss
loss.backward()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
UpperCAmelCase__ : List[str] = problem_type["""title"""]
UpperCAmelCase__ : List[Any] = problem_type["""num_labels"""]
UpperCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
UpperCAmelCase__ : Any = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
if problem_type["num_labels"] > 1:
UpperCAmelCase__ : Optional[int] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
UpperCAmelCase__ : str = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_lowerCamelCase ) as warning_list:
UpperCAmelCase__ : Any = model(**_lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _a (self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : int = DeiTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def a__ ( ) -> int:
UpperCAmelCase__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a (self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : Tuple = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Any = model(**_lowerCamelCase )
# verify the logits
UpperCAmelCase__ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCAmelCase__ : Dict = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : int = prepare_img()
UpperCAmelCase__ : str = image_processor(images=_lowerCamelCase , return_tensors="""pt""" )
UpperCAmelCase__ : Dict = inputs.pixel_values.to(_lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase__ : int = model(_lowerCamelCase )
| 166 | 0 |
import argparse
import os
import re
UpperCamelCase = '''src/transformers'''
# Pattern that looks at the indentation in a line.
UpperCamelCase = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase = re.compile(R'''\[([^\]]+)\]''')
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : str = _re_indent.search(_lowerCamelCase)
return "" if search is None else search.groups()[0]
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple="" , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=None):
lowercase__ : Optional[Any] = 0
lowercase__ : Optional[int] = code.split("\n")
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase):
index += 1
lowercase__ : str = ["\n".join(lines[:index])]
else:
lowercase__ : List[Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase__ : Any = [lines[index]]
index += 1
while index < len(_lowerCamelCase) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase)):
if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level:
if len(_lowerCamelCase) > 0 and get_indent(current_block[-1]).startswith(indent_level + " "):
current_block.append(lines[index])
blocks.append("\n".join(_lowerCamelCase))
if index < len(_lowerCamelCase) - 1:
lowercase__ : List[str] = [lines[index + 1]]
index += 1
else:
lowercase__ : List[str] = []
else:
blocks.append("\n".join(_lowerCamelCase))
lowercase__ : List[str] = [lines[index]]
else:
current_block.append(lines[index])
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase) > 0:
blocks.append("\n".join(_lowerCamelCase))
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase):
blocks.append("\n".join(lines[index:]))
return blocks
def lowercase_ ( _lowerCamelCase : Optional[int]):
def _inner(_lowerCamelCase : str):
return key(_lowerCamelCase).lower().replace("_" , "")
return _inner
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any=None):
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : str):
return x
if key is None:
lowercase__ : Any = noop
# Constants are all uppercase, they go first.
lowercase__ : Tuple = [obj for obj in objects if key(_lowerCamelCase).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase__ : str = [obj for obj in objects if key(_lowerCamelCase)[0].isupper() and not key(_lowerCamelCase).isupper()]
# Functions begin with a lowercase, they go last.
lowercase__ : Any = [obj for obj in objects if not key(_lowerCamelCase)[0].isupper()]
lowercase__ : Dict = ignore_underscore(_lowerCamelCase)
return sorted(_lowerCamelCase , key=_lowerCamelCase) + sorted(_lowerCamelCase , key=_lowerCamelCase) + sorted(_lowerCamelCase , key=_lowerCamelCase)
def lowercase_ ( _lowerCamelCase : str):
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : List[Any]):
lowercase__ : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
lowercase__ : Optional[int] = [part.strip().replace("\"" , "") for part in imports.split(",")]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
lowercase__ : Optional[int] = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(_lowerCamelCase)]) + "]"
lowercase__ : List[Any] = import_statement.split("\n")
if len(_lowerCamelCase) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase__ : Dict = 2 if lines[1].strip() == "[" else 1
lowercase__ : Optional[Any] = [(i, _re_strip_line.search(_lowerCamelCase).groups()[0]) for i, line in enumerate(lines[idx:-idx])]
lowercase__ : Any = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase: x[1])
lowercase__ : List[str] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:])
elif len(_lowerCamelCase) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1]) is not None:
lowercase__ : Any = _re_bracket_content.sub(_replace , lines[1])
else:
lowercase__ : List[Any] = [part.strip().replace("\"" , "") for part in lines[1].split(",")]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1]) == 0:
lowercase__ : Optional[Any] = keys[:-1]
lowercase__ : Optional[Any] = get_indent(lines[1]) + ", ".join([f'''"{k}"''' for k in sort_objects(_lowerCamelCase)])
return "\n".join(_lowerCamelCase)
else:
# Finally we have to deal with imports fitting on one line
lowercase__ : Any = _re_bracket_content.sub(_replace , _lowerCamelCase)
return import_statement
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=True):
with open(_lowerCamelCase , encoding="utf-8") as f:
lowercase__ : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase__ : List[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:")
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase) - 1):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase__ : Optional[int] = main_blocks[block_idx]
lowercase__ : Any = block.split("\n")
# Get to the start of the imports.
lowercase__ : int = 0
while line_idx < len(_lowerCamelCase) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase__ : List[str] = len(_lowerCamelCase)
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase__ : str = "\n".join(block_lines[line_idx:-1])
lowercase__ : Optional[Any] = get_indent(block_lines[1])
# Slit the internal block into blocks of indent level 1.
lowercase__ : List[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase)
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase__ : Optional[int] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase__ : Dict = [(pattern.search(_lowerCamelCase).groups()[0] if pattern.search(_lowerCamelCase) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase__ : Optional[int] = [(i, key) for i, key in enumerate(_lowerCamelCase) if key is not None]
lowercase__ : List[Any] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase: x[1])]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase__ : Tuple = 0
lowercase__ : Tuple = []
for i in range(len(_lowerCamelCase)):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i])
else:
lowercase__ : int = sort_objects_in_import(internal_blocks[sorted_indices[count]])
reorderded_blocks.append(_lowerCamelCase)
count += 1
# And we put our main block back together with its first and last line.
lowercase__ : Any = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]])
if code != "\n".join(_lowerCamelCase):
if check_only:
return True
else:
print(f'''Overwriting {file}.''')
with open(_lowerCamelCase , "w" , encoding="utf-8") as f:
f.write("\n".join(_lowerCamelCase))
def lowercase_ ( _lowerCamelCase : List[Any]=True):
lowercase__ : Optional[int] = []
for root, _, files in os.walk(_lowerCamelCase):
if "__init__.py" in files:
lowercase__ : Optional[int] = sort_imports(os.path.join(_lowerCamelCase , "__init__.py") , check_only=_lowerCamelCase)
if result:
lowercase__ : List[str] = [os.path.join(_lowerCamelCase , "__init__.py")]
if len(_lowerCamelCase) > 0:
raise ValueError(f'''Would overwrite {len(_lowerCamelCase)} files, run `make style`.''')
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
UpperCamelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 87 |
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ):
'''simple docstring'''
_a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
_a : Tuple = 1 - (matter_density + radiation_density + dark_energy)
_a : int = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_a : List[str] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_snake_case = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 294 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase : Any = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
__UpperCamelCase : str = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ = RobertaTokenizer
def __init__( self :Union[str, Any] , __magic_name__ :int=None , __magic_name__ :int=None , __magic_name__ :Any=None , __magic_name__ :int="replace" , __magic_name__ :str="<s>" , __magic_name__ :Dict="</s>" , __magic_name__ :Optional[int]="</s>" , __magic_name__ :List[str]="<s>" , __magic_name__ :Dict="<unk>" , __magic_name__ :str="<pad>" , __magic_name__ :List[str]="<mask>" , __magic_name__ :Any=False , __magic_name__ :Optional[int]=True , **__magic_name__ :List[str] , ):
'''simple docstring'''
super().__init__(
_a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , )
a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _a ) != add_prefix_space:
a = getattr(_a , pre_tok_state.pop("""type""" ) )
a = add_prefix_space
a = pre_tok_class(**_a )
a = add_prefix_space
a = "post_processor"
a = getattr(self.backend_tokenizer , _a , _a )
if tokenizer_component_instance:
a = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a = tuple(state["""sep"""] )
if "cls" in state:
a = tuple(state["""cls"""] )
a = False
if state.get("""add_prefix_space""" , _a ) != add_prefix_space:
a = add_prefix_space
a = True
if state.get("""trim_offsets""" , _a ) != trim_offsets:
a = trim_offsets
a = True
if changes_to_apply:
a = getattr(_a , state.pop("""type""" ) )
a = component_class(**_a )
setattr(self.backend_tokenizer , _a , _a )
@property
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Tuple ):
'''simple docstring'''
a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value
a = value
def lowerCamelCase__ ( self :str , *__magic_name__ :Optional[int] , **__magic_name__ :int ):
'''simple docstring'''
a = kwargs.get("""is_split_into_words""" , _a )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_a , **_a )
def lowerCamelCase__ ( self :str , *__magic_name__ :int , **__magic_name__ :List[Any] ):
'''simple docstring'''
a = kwargs.get("""is_split_into_words""" , _a )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_a , **_a )
def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :Tuple = None ):
'''simple docstring'''
a = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def lowerCamelCase__ ( self :int , __magic_name__ :Union[str, Any] , __magic_name__ :Any=None ):
'''simple docstring'''
a = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Tuple , __magic_name__ :List[Any] = None ):
'''simple docstring'''
a = [self.sep_token_id]
a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 369 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = (IPNDMScheduler,)
UpperCamelCase__ = (('''num_inference_steps''', 50),)
def lowerCamelCase__ ( self :Any , **__magic_name__ :Optional[Any] ):
'''simple docstring'''
a = {"""num_train_timesteps""": 1000}
config.update(**__magic_name__ )
return config
def lowerCamelCase__ ( self :Optional[int] , __magic_name__ :Tuple=0 , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :List[Any] , __magic_name__ :List[Any]=0 , **__magic_name__ :Any ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residuals (must be after setting timesteps)
a = dummy_past_residuals[:]
if time_step is None:
a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__magic_name__ )
a = scheduler_class.from_pretrained(__magic_name__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(__magic_name__ )
# copy over dummy past residual (must be after setting timesteps)
a = dummy_past_residuals[:]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = new_scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self :Optional[Any] , **__magic_name__ :Optional[int] ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config(**__magic_name__ )
a = scheduler_class(**__magic_name__ )
a = 10
a = self.dummy_model()
a = self.dummy_sample_deter
scheduler.set_timesteps(__magic_name__ )
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
a = model(__magic_name__ , __magic_name__ )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ ).prev_sample
return sample
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = dict(self.forward_default_kwargs )
a = kwargs.pop("""num_inference_steps""" , __magic_name__ )
for scheduler_class in self.scheduler_classes:
a = self.get_scheduler_config()
a = scheduler_class(**__magic_name__ )
a = self.dummy_sample
a = 0.1 * sample
if num_inference_steps is not None and hasattr(__magic_name__ , """set_timesteps""" ):
scheduler.set_timesteps(__magic_name__ )
elif num_inference_steps is not None and not hasattr(__magic_name__ , """set_timesteps""" ):
a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
a = dummy_past_residuals[:]
a = scheduler.timesteps[5]
a = scheduler.timesteps[6]
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
a = scheduler.step(__magic_name__ , __magic_name__ , __magic_name__ , **__magic_name__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__magic_name__ , time_step=__magic_name__ )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.full_loop()
a = torch.mean(torch.abs(__magic_name__ ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 347 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
SCREAMING_SNAKE_CASE__ = "cuda" if torch.cuda.is_available() else "cpu"
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any]=1_00 , SCREAMING_SNAKE_CASE : str=" " ):
'''simple docstring'''
lowerCAmelCase = text.split(SCREAMING_SNAKE_CASE )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : dict ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE ):
titles.append(title if title is not None else """""" )
texts.append(SCREAMING_SNAKE_CASE )
return {"title": titles, "text": texts}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : DPRContextEncoder , SCREAMING_SNAKE_CASE : DPRContextEncoderTokenizerFast ):
'''simple docstring'''
lowerCAmelCase = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowerCAmelCase = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE ) , return_dict=SCREAMING_SNAKE_CASE ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : "RagExampleArguments" , SCREAMING_SNAKE_CASE : "ProcessingArguments" , SCREAMING_SNAKE_CASE : "IndexHnswArguments" , ):
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase = dataset.map(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE )
lowerCAmelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase = dataset.map(
partial(SCREAMING_SNAKE_CASE , ctx_encoder=SCREAMING_SNAKE_CASE , ctx_tokenizer=SCREAMING_SNAKE_CASE ) , batched=SCREAMING_SNAKE_CASE , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE , )
# And finally save your dataset
lowerCAmelCase = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(SCREAMING_SNAKE_CASE )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=SCREAMING_SNAKE_CASE )
# And save the index
lowerCAmelCase = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(SCREAMING_SNAKE_CASE )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = field(
default=str(Path(_UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
_SCREAMING_SNAKE_CASE = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
_SCREAMING_SNAKE_CASE = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=str(Path(_UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = field(
default=_UpperCAmelCase , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
_SCREAMING_SNAKE_CASE = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class lowercase :
_SCREAMING_SNAKE_CASE = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
_SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
SCREAMING_SNAKE_CASE__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 46 |
'''simple docstring'''
from math import sqrt
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (
number >= 0
), "'number' must been an int and positive"
_a = True
# 0 and 1 are none primes.
if number <= 1:
_a = False
for divisor in range(2 , int(round(sqrt(__A))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_a = False
break
# precondition
assert isinstance(__A , __A), "'status' must been from type bool"
return status
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_a = list(range(2 , n + 1))
_a = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__A)):
for j in range(i + 1 , len(__A)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_a = 0
# filters actual prime numbers.
_a = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__A , __A), "'ans' must been from type list"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n > 2), "'N' must been an int and > 2"
_a = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1):
if is_prime(__A):
ans.append(__A)
# precondition
assert isinstance(__A , __A), "'ans' must been from type list"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and number >= 0, "'number' must been an int and >= 0"
_a = [] # this list will be returns of the function.
# potential prime number factors.
_a = 2
_a = number
if number == 0 or number == 1:
ans.append(__A)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__A):
while quotient != 1:
if is_prime(__A) and (quotient % factor == 0):
ans.append(__A)
quotient /= factor
else:
factor += 1
else:
ans.append(__A)
# precondition
assert isinstance(__A , __A), "'ans' must been from type list"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (
number >= 0
), "'number' bust been an int and >= 0"
_a = 0
# prime factorization of 'number'
_a = prime_factorization(__A)
_a = max(__A)
# precondition
assert isinstance(__A , __A), "'ans' must been from type int"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (
number >= 0
), "'number' bust been an int and >= 0"
_a = 0
# prime factorization of 'number'
_a = prime_factorization(__A)
_a = min(__A)
# precondition
assert isinstance(__A , __A), "'ans' must been from type int"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A), "'number' must been an int"
assert isinstance(number % 2 == 0 , __A), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A), "'number' must been an int"
assert isinstance(number % 2 != 0 , __A), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase (__A):
"""simple docstring"""
assert (
isinstance(__A , __A) and (number > 2) and is_even(__A)
), "'number' must been an int, even and > 2"
_a = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_a = get_prime_numbers(__A)
_a = len(__A)
# run variable for while-loops.
_a = 0
_a = None
# exit variable. for break up the loops
_a = True
while i < len_pn and loop:
_a = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_a = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(__A , __A)
and (len(__A) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
isinstance(__A , __A)
and isinstance(__A , __A)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_a = 0
while numbera != 0:
_a = numbera % numbera
_a = numbera
_a = rest
# precondition
assert isinstance(__A , __A) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
isinstance(__A , __A)
and isinstance(__A , __A)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_a = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_a = prime_factorization(__A)
_a = prime_factorization(__A)
elif numbera == 1 or numbera == 1:
_a = []
_a = []
_a = max(__A , __A)
_a = 0
_a = 0
_a = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_a = prime_fac_a.count(__A)
_a = prime_fac_a.count(__A)
for _ in range(max(__A , __A)):
ans *= n
else:
_a = prime_fac_a.count(__A)
for _ in range(__A):
ans *= n
done.append(__A)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_a = prime_fac_a.count(__A)
for _ in range(__A):
ans *= n
done.append(__A)
# precondition
assert isinstance(__A , __A) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n >= 0), "'number' must been a positive int"
_a = 0
_a = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__A):
ans += 1
# precondition
assert isinstance(__A , __A) and is_prime(
__A), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
is_prime(__A) and is_prime(__A) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_a = p_number_a + 1 # jump to the next number
_a = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__A):
number += 1
while number < p_number_a:
ans.append(__A)
number += 1
# fetch the next prime number.
while not is_prime(__A):
number += 1
# precondition
assert (
isinstance(__A , __A)
and ans[0] != p_number_a
and ans[len(__A) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n >= 1), "'n' must been int and >= 1"
_a = [] # will be returned.
for divisor in range(1 , n + 1):
if n % divisor == 0:
ans.append(__A)
# precondition
assert ans[0] == 1 and ans[len(__A) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (
number > 1
), "'number' must been an int and >= 1"
_a = get_divisors(__A)
# precondition
assert (
isinstance(__A , __A)
and (divisors[0] == 1)
and (divisors[len(__A) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
isinstance(__A , __A)
and isinstance(__A , __A)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_a = gcd(abs(__A) , abs(__A))
# precondition
assert (
isinstance(__A , __A)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n >= 0), "'n' must been a int and >= 0"
_a = 1 # this will be return.
for factor in range(1 , n + 1):
ans *= factor
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n >= 0), "'n' must been an int and >= 0"
_a = 0
_a = 1
_a = 1 # this will be return
for _ in range(n - 1):
_a = ans
ans += fiba
_a = tmp
return ans
| 211 | 0 |
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [1]
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = 0, 0, 0
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
for _ in range(1 , _A ):
SCREAMING_SNAKE_CASE__ = min(_A , _A , _A )
ugly_nums.append(_A )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 218 |
import warnings
from .generation import TFGenerationMixin
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , A__ , )
| 218 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : int = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'roberta'
def __init__( self : Union[str, Any] , lowerCamelCase : str=5_02_65 , lowerCamelCase : Any=7_68 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : List[str]=12 , lowerCamelCase : int=30_72 , lowerCamelCase : str="gelu" , lowerCamelCase : List[str]=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Tuple=5_12 , lowerCamelCase : Any=2 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Optional[Any]=1E-12 , lowerCamelCase : Dict=1 , lowerCamelCase : Any=0 , lowerCamelCase : Optional[Any]=2 , lowerCamelCase : str="absolute" , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Optional[int]=None , **lowerCamelCase : List[str] , ) -> Tuple:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : List[Any] = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : str = num_attention_heads
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Optional[int] = type_vocab_size
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : int = layer_norm_eps
lowerCAmelCase_ : List[Any] = position_embedding_type
lowerCAmelCase_ : Any = use_cache
lowerCAmelCase_ : Optional[Any] = classifier_dropout
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
@property
def __lowercase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ : Tuple = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 120 |
'''simple docstring'''
from math import ceil
def UpperCamelCase_ ( A__ : int = 10_01 ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : int = 2 * i + 1
lowerCAmelCase_ : Tuple = 2 * i
lowerCAmelCase_ : Tuple = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__A : str = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 120 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def A ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any=None ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
_UpperCAmelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
_UpperCAmelCase = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json()
_UpperCAmelCase = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
_UpperCAmelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_UpperCAmelCase ):
_UpperCAmelCase = requests.get(url + F"&page={i + 2}" , headers=_UpperCAmelCase ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def A ( _UpperCAmelCase : Dict , _UpperCAmelCase : Dict=None ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
_UpperCAmelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
_UpperCAmelCase = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
_UpperCAmelCase = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_UpperCAmelCase ):
_UpperCAmelCase = requests.get(url + F"&page={i + 2}" , headers=_UpperCAmelCase ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': F"Bearer {token}"}
_UpperCAmelCase = requests.get(_UpperCAmelCase , headers=_UpperCAmelCase , allow_redirects=_UpperCAmelCase )
_UpperCAmelCase = result.headers['Location']
_UpperCAmelCase = requests.get(_UpperCAmelCase , allow_redirects=_UpperCAmelCase )
_UpperCAmelCase = os.path.join(_UpperCAmelCase , F"{artifact_name}.zip" )
with open(_UpperCAmelCase , 'wb' ) as fp:
fp.write(response.content )
def A ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any]=None ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(_UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_UpperCAmelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_UpperCAmelCase ) as f:
for line in f:
_UpperCAmelCase = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(': ' )]
_UpperCAmelCase = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
_UpperCAmelCase = line[len('FAILED ' ) :]
failed_tests.append(_UpperCAmelCase )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(_UpperCAmelCase )} for `errors` "
F"and {len(_UpperCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
' problem.' )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(_UpperCAmelCase , _UpperCAmelCase )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(_UpperCAmelCase , _UpperCAmelCase )]
return result
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for p in os.listdir(_UpperCAmelCase ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_UpperCAmelCase , job_links=_UpperCAmelCase ) )
return errors
def A ( _UpperCAmelCase : Any , _UpperCAmelCase : Tuple=None ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _UpperCAmelCase : item[1]["count"] , reverse=_UpperCAmelCase ) )
return r
def A ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = test.split('::' )[0]
if test.startswith('tests/models/' ):
_UpperCAmelCase = test.split('/' )[2]
else:
_UpperCAmelCase = None
return test
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {'count': n_errors, 'errors': error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _UpperCAmelCase : item[1]["count"] , reverse=_UpperCAmelCase ) )
return r
def A ( _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = '| no. | error | status |'
_UpperCAmelCase = '|-:|:-|:-|'
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]['count']
_UpperCAmelCase = F"| {count} | {error[:100]} | |"
lines.append(_UpperCAmelCase )
return "\n".join(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = '| model | no. of errors | major error | count |'
_UpperCAmelCase = '|-:|-:|-:|-:|'
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]['count']
_UpperCAmelCase , _UpperCAmelCase = list(reduced_by_model[model]['errors'].items() )[0]
_UpperCAmelCase = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(_UpperCAmelCase )
return "\n".join(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
UpperCAmelCase__ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
UpperCAmelCase__ = get_job_links(args.workflow_run_id, token=args.token)
UpperCAmelCase__ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
UpperCAmelCase__ = k.find(" / ")
UpperCAmelCase__ = k[index + len(" / ") :]
UpperCAmelCase__ = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
UpperCAmelCase__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
UpperCAmelCase__ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
UpperCAmelCase__ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
UpperCAmelCase__ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
UpperCAmelCase__ = reduce_by_error(errors)
UpperCAmelCase__ = reduce_by_model(errors)
UpperCAmelCase__ = make_github_table(reduced_by_error)
UpperCAmelCase__ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 290 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["CLIPFeatureExtractor"]
UpperCAmelCase__ = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290 | 1 |
import requests
snake_case__ : int = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def _a ( lowerCamelCase: str ) -> None:
'''simple docstring'''
__A = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 117 |
from __future__ import annotations
def _a ( lowerCamelCase: list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(lowerCamelCase , [] , 0 , [0 for i in range(len(lowerCamelCase ) )] )
def _a ( lowerCamelCase: list[int | str] , lowerCamelCase: list[int | str] , lowerCamelCase: int , lowerCamelCase: list[int] , ) -> None:
'''simple docstring'''
if index == len(lowerCamelCase ):
print(lowerCamelCase )
return
for i in range(len(lowerCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
__A = True
create_state_space_tree(lowerCamelCase , lowerCamelCase , index + 1 , lowerCamelCase )
current_sequence.pop()
__A = False
snake_case__ : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case__ : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 117 | 1 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Tuple =VideoToVideoSDPipeline
a_ : str =TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
a_ : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
a_ : Optional[int] =PipelineTesterMixin.required_optional_params - {"""latents"""}
a_ : Optional[int] =False
# No `output_type`.
a_ : Tuple =frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_snake_case : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , )
torch.manual_seed(0 )
_snake_case : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
_snake_case : Any = CLIPTextModel(UpperCamelCase )
_snake_case : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_snake_case : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : str=0 ):
'''simple docstring'''
_snake_case : Any = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase )
if str(UpperCamelCase ).startswith('mps' ):
_snake_case : List[Any] = torch.manual_seed(UpperCamelCase )
else:
_snake_case : Any = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase )
_snake_case : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : Any = self.get_dummy_components()
_snake_case : Optional[int] = VideoToVideoSDPipeline(**UpperCamelCase )
_snake_case : List[Any] = sd_pipe.to(UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase )
_snake_case : List[Any] = self.get_dummy_inputs(UpperCamelCase )
_snake_case : List[str] = 'np'
_snake_case : str = sd_pipe(**UpperCamelCase ).frames
_snake_case : Any = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
_snake_case : Dict = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase , expected_max_diff=5e-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Any = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_snake_case : Dict = torch.Generator(device='cpu' ).manual_seed(0 )
_snake_case : str = torch.randn((1, 10, 3, 10_24, 5_76) , generator=UpperCamelCase )
_snake_case : Union[str, Any] = video.to('cuda' )
_snake_case : Any = 'Spiderman is surfing'
_snake_case : str = pipe(UpperCamelCase , video=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=3 , output_type='pt' ).frames
_snake_case : Dict = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 260 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: complex , lowerCAmelCase: str = "x" , lowerCAmelCase: float = 10**-10 , lowerCAmelCase: int = 1 , )-> complex:
_snake_case : Optional[int] = symbols(lowerCAmelCase )
_snake_case : Tuple = lambdify(lowerCAmelCase , lowerCAmelCase )
_snake_case : Union[str, Any] = lambdify(lowerCAmelCase , diff(lowerCAmelCase , lowerCAmelCase ) )
_snake_case : int = starting_point
while True:
if diff_function(lowerCAmelCase ) != 0:
_snake_case : Optional[Any] = prev_guess - multiplicity * func(lowerCAmelCase ) / diff_function(
lowerCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_snake_case : Dict = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson("log(y) - 1", 2, variable="y")}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
| 260 | 1 |
import os
from pathlib import Path
def lowerCamelCase__ ( ):
from torch.utils.cpp_extension import load
SCREAMING_SNAKE_CASE : List[Any] = Path(_UpperCamelCase).resolve().parent.parent.parent / "kernels" / "deformable_detr"
SCREAMING_SNAKE_CASE : Union[str, Any] = [
root / filename
for filename in [
"vision.cpp",
os.path.join("cpu" , "ms_deform_attn_cpu.cpp"),
os.path.join("cuda" , "ms_deform_attn_cuda.cu"),
]
]
load(
"MultiScaleDeformableAttention" , _UpperCamelCase , with_cuda=_UpperCamelCase , extra_include_paths=[str(_UpperCamelCase)] , extra_cflags=["-DWITH_CUDA=1"] , extra_cuda_cflags=[
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA | 76 | """simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCAmelCase__ ( _UpperCamelCase : Iterable[str] , _UpperCamelCase : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
snake_case = iter(_UpperCamelCase )
while True:
snake_case = tuple(itertools.islice(_UpperCamelCase , _UpperCamelCase ) )
if not chunk:
return
yield chunk
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
snake_case = ''
if len(_UpperCamelCase ) < 2:
return dirty
for i in range(len(_UpperCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_UpperCamelCase ) & 1:
clean += "X"
return clean
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> list[str]:
"""simple docstring"""
snake_case = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
snake_case = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_UpperCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_UpperCamelCase )
return table
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = generate_table(_UpperCamelCase )
snake_case = prepare_input(_UpperCamelCase )
snake_case = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = generate_table(_UpperCamelCase )
snake_case = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 150 | 0 |
def lowerCAmelCase_ ( _snake_case : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ : List[str] = [], []
while len(_snake_case ) > 1:
__magic_name__ , __magic_name__ : List[Any] = min(_snake_case ), max(_snake_case )
start.append(_snake_case )
end.append(_snake_case )
collection.remove(_snake_case )
collection.remove(_snake_case )
end.reverse()
return start + collection + end
if __name__ == "__main__":
snake_case : List[Any] = input("Enter numbers separated by a comma:\n").strip()
snake_case : List[Any] = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 41 |
from typing import Any
class _snake_case :
def __init__( self , _a ):
__magic_name__ : Union[str, Any] = data
__magic_name__ : str = None
class _snake_case :
def __init__( self ):
__magic_name__ : List[str] = None
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.head
while temp is not None:
print(temp.data , end=" " )
__magic_name__ : Optional[int] = temp.next
print()
def SCREAMING_SNAKE_CASE ( self , _a ):
__magic_name__ : Union[str, Any] = Node(_a )
__magic_name__ : List[str] = self.head
__magic_name__ : Union[str, Any] = new_node
def SCREAMING_SNAKE_CASE ( self , _a , _a ):
if node_data_a == node_data_a:
return
else:
__magic_name__ : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ : Tuple = node_a.next
__magic_name__ : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
__magic_name__ : List[Any] = node_a.next
if node_a is None or node_a is None:
return
__magic_name__ , __magic_name__ : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
snake_case : Optional[int] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 41 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a__ = Features({"""text""": Value("""string""" )} )
a__ = Features({"""labels""": ClassLabel} )
a__ = "text"
a__ = "labels"
def _lowercase ( self : Tuple , UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCamelCase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
__magic_name__ = copy.deepcopy(self )
__magic_name__ = self.label_schema.copy()
__magic_name__ = features[self.label_column]
__magic_name__ = label_schema
return task_template
@property
def _lowercase ( self : Optional[int] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 88 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[int] = TypeVar("DatasetType", Dataset, IterableDataset)
def A (__A : List[DatasetType] , __A : Optional[List[float]] = None , __A : Optional[int] = None , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
else:
return _interleave_iterable_datasets(
__A , __A , __A , info=__A , split=__A , stopping_strategy=__A )
def A (__A : List[DatasetType] , __A : Optional[DatasetInfo] = None , __A : Optional[NamedSplit] = None , __A : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(__A ):
if not isinstance(__A , (Dataset, IterableDataset) ):
if isinstance(__A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'''is an empty dataset dictionary.''' )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(__A )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(__A ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(__A ).__name__}.""" )
if i == 0:
UpperCAmelCase_ , UpperCAmelCase_ = (
(Dataset, IterableDataset) if isinstance(__A , __A ) else (IterableDataset, Dataset)
)
elif not isinstance(__A , __A ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(__A , info=__A , split=__A , axis=__A )
else:
return _concatenate_iterable_datasets(__A , info=__A , split=__A , axis=__A )
| 51 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_snake_case )
class __SCREAMING_SNAKE_CASE ( _snake_case ):
snake_case_ = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case_ = Features({"""audio""": Audio()} )
snake_case_ = Features({"""labels""": ClassLabel} )
snake_case_ = """audio"""
snake_case_ = """labels"""
def __magic_name__ ( self : Tuple , __lowercase : Any ) -> Tuple:
if self.label_column not in features:
raise ValueError(F"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , UpperCamelCase__ ):
raise ValueError(F"Column {self.label_column} is not a ClassLabel." )
SCREAMING_SNAKE_CASE__ : str =copy.deepcopy(self )
SCREAMING_SNAKE_CASE__ : Tuple =self.label_schema.copy()
SCREAMING_SNAKE_CASE__ : Dict =features[self.label_column]
SCREAMING_SNAKE_CASE__ : Optional[Any] =label_schema
return task_template
@property
def __magic_name__ ( self : List[str] ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
} | 363 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """facebook/bart-large-mnli"""
snake_case_ = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
snake_case_ = """text_classifier"""
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSequenceClassification
snake_case_ = ["""text""", ["""text"""]]
snake_case_ = ["""text"""]
def __magic_name__ ( self : Any ) -> Any:
super().setup()
SCREAMING_SNAKE_CASE__ : int =self.model.config
SCREAMING_SNAKE_CASE__ : int =-1
for idx, label in config.idalabel.items():
if label.lower().startswith('''entail''' ):
SCREAMING_SNAKE_CASE__ : Any =int(__lowercase )
if self.entailment_id == -1:
raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' )
def __magic_name__ ( self : str , __lowercase : Any , __lowercase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Dict =labels
return self.pre_processor(
[text] * len(__lowercase ) , [F"This example is {label}" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , )
def __magic_name__ ( self : List[Any] , __lowercase : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : int =outputs.logits
SCREAMING_SNAKE_CASE__ : Any =torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 222 | 0 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Dict = 16
__lowerCamelCase : Optional[int] = 32
def A_ ( _lowerCAmelCase , _lowerCAmelCase = 16 ) -> Optional[Any]:
UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCamelCase : int = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase : Union[str, Any] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase : List[str] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase : str = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase : str = 8
else:
UpperCamelCase : List[Any] = None
return tokenizer.pad(
lowerCAmelCase_ , padding="longest" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCamelCase : str = DataLoader(
tokenized_datasets["train"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
UpperCamelCase : Optional[int] = DataLoader(
tokenized_datasets["validation"] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : List[Any] = mocked_dataloaders # noqa: F811
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowerCAmelCase_ ) == "1":
UpperCamelCase : List[Any] = 2
# Initialize accelerator
UpperCamelCase : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase : List[Any] = config["lr"]
UpperCamelCase : int = int(config["num_epochs"] )
UpperCamelCase : Dict = int(config["seed"] )
UpperCamelCase : Optional[int] = int(config["batch_size"] )
UpperCamelCase : str = evaluate.load("glue" , "mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCAmelCase_ )
def inner_training_loop(_lowerCAmelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase : List[str] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase : str = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase : List[str] = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
UpperCamelCase , UpperCamelCase : List[Any] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate scheduler
UpperCamelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCamelCase : Optional[int] = model(**lowerCAmelCase_ )
UpperCamelCase : List[str] = outputs.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**lowerCAmelCase_ )
UpperCamelCase : Tuple = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
UpperCamelCase : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , lowerCAmelCase_ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def A_ ( ) -> Optional[Any]:
UpperCamelCase : Dict = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCamelCase : List[str] = parser.parse_args()
UpperCamelCase : Union[str, Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 52 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = None
__UpperCAmelCase = None
@property
def _lowerCAmelCase ( self : List[Any] ):
return self.feat_extract_tester.prepare_feat_extract_dict()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case ,'feature_size' ) )
self.assertTrue(hasattr(snake_case ,'sampling_rate' ) )
self.assertTrue(hasattr(snake_case ,'padding_value' ) )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case ) == len(snake_case ) for x, y in zip(snake_case ,processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case )
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
SCREAMING_SNAKE_CASE =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _lowerCAmelCase ( self : List[Any] ,snake_case : Optional[Any]=False ):
def _inputs_have_equal_length(snake_case : Dict ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : str ,snake_case : Dict ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.seq_length_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.max_seq_length + pad_diff
SCREAMING_SNAKE_CASE =self.feat_extract_tester.min_seq_length
SCREAMING_SNAKE_CASE =self.feat_extract_tester.batch_size
SCREAMING_SNAKE_CASE =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,pad_to_multiple_of=10 )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(all(len(snake_case ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
SCREAMING_SNAKE_CASE =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(snake_case ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
SCREAMING_SNAKE_CASE =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int]=False ):
def _inputs_have_equal_length(snake_case : str ):
SCREAMING_SNAKE_CASE =len(input[0] )
for input_slice in input[1:]:
if len(snake_case ) != length:
return False
return True
def _inputs_are_equal(snake_case : Tuple ,snake_case : Optional[Any] ):
if len(snake_case ) != len(snake_case ):
return False
for input_slice_a, input_slice_a in zip(snake_case ,snake_case ):
if not np.allclose(np.asarray(snake_case ) ,np.asarray(snake_case ) ,atol=1e-3 ):
return False
return True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case )
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to smallest with np
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
# truncate to middle
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case ,return_tensors='np' ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=snake_case )
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
SCREAMING_SNAKE_CASE =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertTrue(_inputs_are_equal(snake_case ,snake_case ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(snake_case ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='longest' ,truncation=snake_case )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(snake_case ):
feat_extract.pad(snake_case ,padding='max_length' ,truncation=snake_case )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
SCREAMING_SNAKE_CASE =12
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,truncation=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=snake_case ,)
SCREAMING_SNAKE_CASE =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
SCREAMING_SNAKE_CASE =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
SCREAMING_SNAKE_CASE =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(snake_case ) )
self.assertFalse(_inputs_have_equal_length(snake_case ) )
def _lowerCAmelCase ( self : Optional[int] ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : Tuple ):
self._check_padding(numpify=snake_case )
def _lowerCAmelCase ( self : List[str] ):
self._check_truncation(numpify=snake_case )
def _lowerCAmelCase ( self : int ):
self._check_truncation(numpify=snake_case )
@require_torch
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )[input_name]
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =feat_extract.pad(snake_case ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,snake_case )
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =self.feat_extract_dict
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =self.feature_extraction_class(**snake_case )
SCREAMING_SNAKE_CASE =self.feat_extract_tester.prepare_inputs_for_common()
SCREAMING_SNAKE_CASE =[len(snake_case ) for x in speech_inputs]
SCREAMING_SNAKE_CASE =feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE =BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE =min(snake_case )
SCREAMING_SNAKE_CASE =feat_extract.pad(
snake_case ,padding='max_length' ,max_length=snake_case ,truncation=snake_case ,return_tensors='np' )
self.assertIn('attention_mask' ,snake_case )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 334 | 0 |
# Function to print upper half of diamond (pyramid)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
for i in range(0 , lowerCAmelCase__ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
for i in range(lowerCAmelCase__ , 0 , -1 ):
for _ in range(lowerCAmelCase__ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(lowerCAmelCase__ ) # upper half
reverse_floyd(lowerCAmelCase__ ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
lowercase__ :Optional[Any] = 1
while K:
lowercase__ :int = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
lowercase__ :Dict = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 97 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowercase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
lowercase = [[0.0, 0.0], [0.0, 0.0]]
lowercase , lowercase = matrix[1][1], matrix[0][0]
lowercase , lowercase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowercase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
lowercase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowercase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowercase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowercase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowercase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowercase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowercase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowercase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowercase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowercase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowercase = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
lowercase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowercase = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 97 | 1 |
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( snake_case , snake_case , snake_case ) -> Optional[Any]:
def get_masked_lm_array(snake_case ):
_lowercase : Any = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_lowercase : Optional[Any] = tf.train.load_variable(snake_case , snake_case )
if "kernel" in name:
_lowercase : List[Any] = array.transpose()
return torch.from_numpy(snake_case )
def get_encoder_array(snake_case ):
_lowercase : str = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_lowercase : Optional[Any] = tf.train.load_variable(snake_case , snake_case )
if "kernel" in name:
_lowercase : List[Any] = array.transpose()
return torch.from_numpy(snake_case )
def get_encoder_layer_array(snake_case , snake_case ):
_lowercase : Optional[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_lowercase : Optional[int] = tf.train.load_variable(snake_case , snake_case )
if "kernel" in name:
_lowercase : Tuple = array.transpose()
return torch.from_numpy(snake_case )
def get_encoder_attention_layer_array(snake_case , snake_case , snake_case ):
_lowercase : Any = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
_lowercase : int = tf.train.load_variable(snake_case , snake_case )
_lowercase : List[str] = array.reshape(snake_case )
if "kernel" in name:
_lowercase : Dict = array.transpose()
return torch.from_numpy(snake_case )
print(F'''Loading model based on config from {config_path}...''' )
_lowercase : Union[str, Any] = BertConfig.from_json_file(snake_case )
_lowercase : Dict = BertForMaskedLM(snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_lowercase : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
_lowercase : BertSelfAttention = layer.attention.self
_lowercase : Optional[int] = get_encoder_attention_layer_array(
snake_case , "_query_dense/kernel" , self_attn.query.weight.data.shape )
_lowercase : Tuple = get_encoder_attention_layer_array(
snake_case , "_query_dense/bias" , self_attn.query.bias.data.shape )
_lowercase : int = get_encoder_attention_layer_array(
snake_case , "_key_dense/kernel" , self_attn.key.weight.data.shape )
_lowercase : List[Any] = get_encoder_attention_layer_array(
snake_case , "_key_dense/bias" , self_attn.key.bias.data.shape )
_lowercase : Tuple = get_encoder_attention_layer_array(
snake_case , "_value_dense/kernel" , self_attn.value.weight.data.shape )
_lowercase : int = get_encoder_attention_layer_array(
snake_case , "_value_dense/bias" , self_attn.value.bias.data.shape )
# Self-attention Output
_lowercase : BertSelfOutput = layer.attention.output
_lowercase : Tuple = get_encoder_attention_layer_array(
snake_case , "_output_dense/kernel" , self_output.dense.weight.data.shape )
_lowercase : Tuple = get_encoder_attention_layer_array(
snake_case , "_output_dense/bias" , self_output.dense.bias.data.shape )
_lowercase : List[str] = get_encoder_layer_array(snake_case , "_attention_layer_norm/gamma" )
_lowercase : int = get_encoder_layer_array(snake_case , "_attention_layer_norm/beta" )
# Intermediate
_lowercase : BertIntermediate = layer.intermediate
_lowercase : Any = get_encoder_layer_array(snake_case , "_intermediate_dense/kernel" )
_lowercase : Tuple = get_encoder_layer_array(snake_case , "_intermediate_dense/bias" )
# Output
_lowercase : BertOutput = layer.output
_lowercase : Union[str, Any] = get_encoder_layer_array(snake_case , "_output_dense/kernel" )
_lowercase : Optional[int] = get_encoder_layer_array(snake_case , "_output_dense/bias" )
_lowercase : int = get_encoder_layer_array(snake_case , "_output_layer_norm/gamma" )
_lowercase : Dict = get_encoder_layer_array(snake_case , "_output_layer_norm/beta" )
# Embeddings
_lowercase : int = get_encoder_array("_position_embedding_layer/embeddings" )
_lowercase : str = get_encoder_array("_type_embedding_layer/embeddings" )
_lowercase : Dict = get_encoder_array("_embedding_norm_layer/gamma" )
_lowercase : List[str] = get_encoder_array("_embedding_norm_layer/beta" )
# LM Head
_lowercase : List[str] = model.cls.predictions.transform
_lowercase : Union[str, Any] = get_masked_lm_array("dense/kernel" )
_lowercase : Optional[Any] = get_masked_lm_array("dense/bias" )
_lowercase : int = get_masked_lm_array("layer_norm/gamma" )
_lowercase : Any = get_masked_lm_array("layer_norm/beta" )
_lowercase : List[str] = get_masked_lm_array("embedding_table" )
# Pooling
_lowercase : List[str] = BertPooler(config=snake_case )
_lowercase : BertPooler = get_encoder_array("_pooler_layer/kernel" )
_lowercase : BertPooler = get_encoder_array("_pooler_layer/bias" )
# Export final model
model.save_pretrained(snake_case )
# Integration test - should load without any errors ;)
_lowercase : Union[str, Any] = BertForMaskedLM.from_pretrained(snake_case )
print(new_model.eval() )
print("Model conversion was done sucessfully!" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_snake_case = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 250 |
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case ) -> float:
_lowercase : Optional[Any] = 0.00
_lowercase : Dict = 0
for resistor in resistors:
if resistor <= 0:
_lowercase : Union[str, Any] = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(snake_case )
first_sum += 1 / float(snake_case )
index += 1
return 1 / first_sum
def _A ( snake_case ) -> float:
_lowercase : Dict = 0.00
_lowercase : List[str] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_lowercase : Dict = F'''Resistor at index {index} has a negative value!'''
raise ValueError(snake_case )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 1 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowerCAmelCase__ ( lowerCamelCase : Optional[Any] ):
_A : Optional[Any] = model.config
_A : Dict = DonutSwinConfig(
image_size=original_config.input_size ,patch_size=4 ,depths=original_config.encoder_layer ,num_heads=[4, 8, 16, 32] ,window_size=original_config.window_size ,embed_dim=128 ,)
_A : Any = MBartConfig(
is_decoder=lowerCamelCase ,is_encoder_decoder=lowerCamelCase ,add_cross_attention=lowerCamelCase ,decoder_layers=original_config.decoder_layer ,max_position_embeddings=original_config.max_position_embeddings ,vocab_size=len(
model.decoder.tokenizer ) ,scale_embedding=lowerCamelCase ,add_final_layer_norm=lowerCamelCase ,)
return encoder_config, decoder_config
def lowerCAmelCase__ ( lowerCamelCase : List[str] ):
if "encoder.model" in name:
_A : Dict = name.replace('encoder.model' ,'encoder' )
if "decoder.model" in name:
_A : Dict = name.replace('decoder.model' ,'decoder' )
if "patch_embed.proj" in name:
_A : int = name.replace('patch_embed.proj' ,'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_A : Tuple = name.replace('patch_embed.norm' ,'embeddings.norm' )
if name.startswith('encoder' ):
if "layers" in name:
_A : Union[str, Any] = 'encoder.' + name
if "attn.proj" in name:
_A : Optional[int] = name.replace('attn.proj' ,'attention.output.dense' )
if "attn" in name and "mask" not in name:
_A : Optional[Any] = name.replace('attn' ,'attention.self' )
if "norm1" in name:
_A : Tuple = name.replace('norm1' ,'layernorm_before' )
if "norm2" in name:
_A : Any = name.replace('norm2' ,'layernorm_after' )
if "mlp.fc1" in name:
_A : Optional[int] = name.replace('mlp.fc1' ,'intermediate.dense' )
if "mlp.fc2" in name:
_A : Union[str, Any] = name.replace('mlp.fc2' ,'output.dense' )
if name == "encoder.norm.weight":
_A : Optional[int] = 'encoder.layernorm.weight'
if name == "encoder.norm.bias":
_A : Dict = 'encoder.layernorm.bias'
return name
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : Any ):
for key in orig_state_dict.copy().keys():
_A : Tuple = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
_A : str = key.split('.' )
_A : Any = int(key_split[3] )
_A : Dict = int(key_split[5] )
_A : Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_A : List[str] = val[:dim, :]
_A : List[Any] = val[dim : dim * 2, :]
_A : Optional[Any] = val[-dim:, :]
else:
_A : str = val[:dim]
_A : Optional[int] = val[dim : dim * 2]
_A : Optional[int] = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
_A : Dict = val
return orig_state_dict
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Any=False ):
# load original model
_A : Optional[Any] = DonutModel.from_pretrained(lowerCamelCase ).eval()
# load HuggingFace model
_A , _A : Tuple = get_configs(lowerCamelCase )
_A : List[Any] = DonutSwinModel(lowerCamelCase )
_A : Any = MBartForCausalLM(lowerCamelCase )
_A : Tuple = VisionEncoderDecoderModel(encoder=lowerCamelCase ,decoder=lowerCamelCase )
model.eval()
_A : str = original_model.state_dict()
_A : Any = convert_state_dict(lowerCamelCase ,lowerCamelCase )
model.load_state_dict(lowerCamelCase )
# verify results on scanned document
_A : Tuple = load_dataset('hf-internal-testing/example-documents' )
_A : str = dataset['test'][0]['image'].convert('RGB' )
_A : Tuple = XLMRobertaTokenizerFast.from_pretrained(lowerCamelCase ,from_slow=lowerCamelCase )
_A : Union[str, Any] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis ,size=original_model.config.input_size[::-1] )
_A : Union[str, Any] = DonutProcessor(lowerCamelCase ,lowerCamelCase )
_A : Union[str, Any] = processor(lowerCamelCase ,return_tensors='pt' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
_A : Union[str, Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_A : List[str] = 'When is the coffee break?'
_A : str = task_prompt.replace('{user_input}' ,lowerCamelCase )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
_A : Dict = '<s_rvlcdip>'
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
_A : List[str] = '<s_cord>'
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
_A : Tuple = 's_cord-v2>'
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
_A : str = '<s_zhtrainticket>'
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
_A : Tuple = 'hello world'
else:
raise ValueError('Model name not supported' )
_A : str = original_model.decoder.tokenizer(lowerCamelCase ,add_special_tokens=lowerCamelCase ,return_tensors='pt' )[
'input_ids'
]
_A : List[Any] = original_model.encoder.model.patch_embed(lowerCamelCase )
_A , _A : Dict = model.encoder.embeddings(lowerCamelCase )
assert torch.allclose(lowerCamelCase ,lowerCamelCase ,atol=1E-3 )
# verify encoder hidden states
_A : Optional[int] = original_model.encoder(lowerCamelCase )
_A : Tuple = model.encoder(lowerCamelCase ).last_hidden_state
assert torch.allclose(lowerCamelCase ,lowerCamelCase ,atol=1E-2 )
# verify decoder hidden states
_A : int = original_model(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ).logits
_A : List[Any] = model(lowerCamelCase ,decoder_input_ids=lowerCamelCase ).logits
assert torch.allclose(lowerCamelCase ,lowerCamelCase ,atol=1E-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
model.push_to_hub('nielsr/' + model_name.split('/' )[-1] ,commit_message='Update model' )
processor.push_to_hub('nielsr/' + model_name.split('/' )[-1] ,commit_message='Update model' )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
A : Dict = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 227 |
'''simple docstring'''
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : list[int] ,lowerCamelCase : int ):
def count_of_possible_combinations(lowerCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(lowerCamelCase )
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : list[int] ,lowerCamelCase : int ):
def count_of_possible_combinations_with_dp_array(
lowerCamelCase : int ,lowerCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_A : Optional[Any] = sum(
count_of_possible_combinations_with_dp_array(target - item ,lowerCamelCase )
for item in array )
_A : List[str] = answer
return answer
_A : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowerCamelCase ,lowerCamelCase )
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : list[int] ,lowerCamelCase : int ):
_A : Dict = [0] * (target + 1)
_A : List[str] = 1
for i in range(1 ,target + 1 ):
for j in range(lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Dict = 3
A : Union[str, Any] = 5
A : Union[str, Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 227 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 203 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
__snake_case = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
__snake_case = {
"""google/rembert""": 256,
}
__snake_case = """▁"""
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = RemBertTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="[CLS]" , UpperCamelCase__="[SEP]" , UpperCamelCase__="<unk>" , UpperCamelCase__="[SEP]" , UpperCamelCase__="<pad>" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
snake_case : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case : int = do_lower_case
snake_case : Union[str, Any] = remove_space
snake_case : Optional[int] = keep_accents
snake_case : Any = vocab_file
snake_case : Any = False if not self.vocab_file else True
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
snake_case : Optional[Any] = [self.sep_token_id]
snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
'''simple docstring'''
snake_case : Union[str, Any] = [self.sep_token_id]
snake_case : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase__ ) )
return
snake_case : str = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 203 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase : str = logging.get_logger(__name__)
lowercase : List[Any] = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
lowercase : int = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase : Any = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
lowercase : Any = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
lowercase : int = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
lowercase : Any = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
lowercase : Dict = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
lowercase : int = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
lowercase : Any = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
lowercase : Optional[Any] = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
lowercase : int = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
lowercase : Any = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
lowercase : Optional[Any] = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
lowercase : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase : List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase : Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Union[str, Any] = FLAX_MODEL_MAPPING
lowercase : Tuple = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : int = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase : Optional[int] = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Tuple = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase : Any = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase : Any = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase : Any = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Any = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase : List[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : int = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase : Tuple = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase : Optional[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase : List[str] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : Optional[int] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase : Dict = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : str = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase : str = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
"""simple docstring"""
lowercase : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase : Optional[Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 353 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase : ClassVar[Features] = Features({'text': Value('string' )} )
lowercase : ClassVar[Features] = Features({'labels': ClassLabel} )
lowercase : str = "text"
lowercase : str = "labels"
def __lowerCamelCase ( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__UpperCamelCase : int = copy.deepcopy(self )
__UpperCamelCase : List[Any] = self.label_schema.copy()
__UpperCamelCase : Union[str, Any] = features[self.label_column]
__UpperCamelCase : Optional[Any] = label_schema
return task_template
@property
def __lowerCamelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
} | 171 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
A_ = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 64 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = tempfile.mkdtemp()
lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowercase = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'do_convert_rgb': True,
}
lowercase = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = self.get_image_processor()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_slow.save_pretrained(self.tmpdirname )
lowercase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case )
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
processor_fast.save_pretrained(self.tmpdirname )
lowercase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case )
self.assertIsInstance(processor_fast.tokenizer , snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case )
self.assertIsInstance(processor_fast.image_processor , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
lowercase = self.get_image_processor(do_normalize=snake_case )
lowercase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=snake_case )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(snake_case , return_tensors='np' )
lowercase = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = 'Alexandra,T-shirt的价格是15便士。'
lowercase = processor(text=snake_case )
lowercase = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = 'Alexandra,T-shirt的价格是15便士。'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(snake_case )
lowercase = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = ChineseCLIPProcessor(tokenizer=snake_case , image_processor=snake_case )
lowercase = 'Alexandra,T-shirt的价格是15便士。'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 195 | 0 |
'''simple docstring'''
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 360 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple ):
# Return True if there is node that has not iterated.
A__ = [False] * len(_lowerCamelCase )
A__ = []
queue.append(_lowerCamelCase )
A__ = True
while queue:
A__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
A__ = True
A__ = u
return visited[t]
def UpperCamelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] ):
# This array is filled by BFS and to store path
A__ = [-1] * (len(_lowerCamelCase ))
A__ = 0
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A__ = float("Inf" )
A__ = sink
while s != source:
# Find the minimum value in select path
A__ = min(_lowerCamelCase , graph[parent[s]][s] )
A__ = parent[s]
max_flow += path_flow
A__ = sink
while v != source:
A__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A__ = parent[v]
return max_flow
__lowerCAmelCase : Any =[
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__lowerCAmelCase , __lowerCAmelCase : int =0, 5
print(ford_fulkerson(graph, source, sink))
| 123 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class _a :
"""simple docstring"""
def __init__( self : Any , __UpperCamelCase : list[str] )->List[str]:
_UpperCAmelCase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(__UpperCamelCase )
self.set_fail_transitions()
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : str )->int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : str )->None:
_UpperCAmelCase = 0
for character in keyword:
_UpperCAmelCase = self.find_next_state(__UpperCamelCase , __UpperCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_UpperCAmelCase = len(self.adlist ) - 1
else:
_UpperCAmelCase = next_state
self.adlist[current_state]["output"].append(__UpperCamelCase )
def lowercase__ ( self : str )->None:
_UpperCAmelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCamelCase )
_UpperCAmelCase = 0
while q:
_UpperCAmelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCamelCase )
_UpperCAmelCase = self.adlist[r]['''fail_state''']
while (
self.find_next_state(__UpperCamelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
_UpperCAmelCase = self.adlist[state]['''fail_state''']
_UpperCAmelCase = self.find_next_state(
__UpperCamelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
_UpperCAmelCase = 0
_UpperCAmelCase = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : str )->dict[str, list[int]]:
_UpperCAmelCase = {} # returns a dict with keywords and list of its occurrences
_UpperCAmelCase = 0
for i in range(len(__UpperCamelCase ) ):
while (
self.find_next_state(__UpperCamelCase , string[i] ) is None
and current_state != 0
):
_UpperCAmelCase = self.adlist[current_state]['''fail_state''']
_UpperCAmelCase = self.find_next_state(__UpperCamelCase , string[i] )
if next_state is None:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_UpperCAmelCase = []
result[key].append(i - len(__UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = " " ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = 0
for index, char in enumerate(_SCREAMING_SNAKE_CASE ):
if char == separator:
split_words.append(string[last_index:index] )
_UpperCAmelCase = index + 1
elif index + 1 == len(_SCREAMING_SNAKE_CASE ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 260 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_UpperCamelCase = logging.getLogger(__name__)
class _A ( __SCREAMING_SNAKE_CASE ):
def __init__( self , __UpperCAmelCase=-1 ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = label_idx
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = mode.value
__UpperCAmelCase : List[Any] = os.path.join(__UpperCAmelCase , f'{mode}.txt' )
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Tuple = []
with open(__UpperCAmelCase , encoding="""utf-8""" ) as f:
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Dict = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
guid_index += 1
__UpperCAmelCase : str = []
__UpperCAmelCase : Any = []
else:
__UpperCAmelCase : int = line.split(""" """ )
words.append(splits[0] )
if len(__UpperCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
return examples
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(__UpperCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__UpperCAmelCase : Tuple = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(__UpperCAmelCase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def __A ( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if path:
with open(__UpperCAmelCase , """r""" ) as f:
__UpperCAmelCase : Any = f.read().splitlines()
if "O" not in labels:
__UpperCAmelCase : Tuple = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _A ( __SCREAMING_SNAKE_CASE ):
def __init__( self ) -> Any:
'''simple docstring'''
super().__init__(label_idx=-2 )
def __A ( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if path:
with open(__UpperCAmelCase , """r""" ) as f:
__UpperCAmelCase : List[Any] = f.read().splitlines()
if "O" not in labels:
__UpperCAmelCase : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _A ( __SCREAMING_SNAKE_CASE ):
def __A ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = mode.value
__UpperCAmelCase : Union[str, Any] = os.path.join(__UpperCAmelCase , f'{mode}.txt' )
__UpperCAmelCase : int = 1
__UpperCAmelCase : str = []
with open(__UpperCAmelCase , encoding="""utf-8""" ) as f:
for sentence in parse_incr(__UpperCAmelCase ):
__UpperCAmelCase : Union[str, Any] = []
__UpperCAmelCase : List[str] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=__UpperCAmelCase , labels=__UpperCAmelCase ) )
guid_index += 1
return examples
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
for sentence in parse_incr(__UpperCAmelCase ):
__UpperCAmelCase : Any = preds_list[example_id]
__UpperCAmelCase : Optional[int] = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(__UpperCAmelCase )
example_id += 1
def __A ( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if path:
with open(__UpperCAmelCase , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 365 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"image": Image()} )
_SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"labels": ClassLabel} )
_SCREAMING_SNAKE_CASE : str = "image"
_SCREAMING_SNAKE_CASE : str = "labels"
def __A ( self , __UpperCAmelCase ) -> str:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
__UpperCAmelCase : int = copy.deepcopy(self )
__UpperCAmelCase : str = self.label_schema.copy()
__UpperCAmelCase : Optional[Any] = features[self.label_column]
__UpperCAmelCase : Optional[int] = label_schema
return task_template
@property
def __A ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 16 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_A : Dict =0
_A : Optional[Any] =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_A : int =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_A : Optional[Any] =tuple[int, int]
class _lowercase :
def __init__( self: List[str] , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: Node | None , ):
lowerCamelCase__ : Optional[int] = pos_x
lowerCamelCase__ : List[Any] = pos_y
lowerCamelCase__ : Dict = (pos_y, pos_x)
lowerCamelCase__ : str = goal_x
lowerCamelCase__ : Optional[int] = goal_y
lowerCamelCase__ : List[str] = g_cost
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : Any = self.calculate_heuristic()
lowerCamelCase__ : Tuple = self.g_cost + self.h_cost
def lowerCamelCase_ ( self: List[str] ):
lowerCamelCase__ : Union[str, Any] = self.pos_x - self.goal_x
lowerCamelCase__ : Tuple = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCamelCase__ ) + abs(UpperCamelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: List[str] , UpperCamelCase__: Node ):
return self.f_cost < other.f_cost
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: TPosition , UpperCamelCase__: TPosition ):
lowerCamelCase__ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase__ )
lowerCamelCase__ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , UpperCamelCase__ )
lowerCamelCase__ : List[Any] = [self.start]
lowerCamelCase__ : list[Node] = []
lowerCamelCase__ : Dict = False
def lowerCamelCase_ ( self: int ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase__ : int = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCamelCase__ )
self.closed_nodes.append(UpperCamelCase__ )
lowerCamelCase__ : str = self.get_successors(UpperCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
lowerCamelCase__ : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase__ )
else:
self.open_nodes.append(UpperCamelCase__ )
return [self.start.pos]
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Node ):
lowerCamelCase__ : Tuple = []
for action in delta:
lowerCamelCase__ : Any = parent.pos_x + action[1]
lowerCamelCase__ : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase__ , ) )
return successors
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Node | None ):
lowerCamelCase__ : Optional[Any] = node
lowerCamelCase__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase__ : List[Any] = current_node.parent
path.reverse()
return path
class _lowercase :
def __init__( self: str , UpperCamelCase__: TPosition , UpperCamelCase__: TPosition ):
lowerCamelCase__ : str = AStar(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Dict = AStar(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : str = False
def lowerCamelCase_ ( self: List[Any] ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCamelCase__ : Any = self.fwd_astar.open_nodes.pop(0 )
lowerCamelCase__ : int = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCamelCase__ , UpperCamelCase__ )
self.fwd_astar.closed_nodes.append(UpperCamelCase__ )
self.bwd_astar.closed_nodes.append(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = current_bwd_node
lowerCamelCase__ : Optional[int] = current_fwd_node
lowerCamelCase__ : int = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCamelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCamelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
lowerCamelCase__ : Optional[Any] = astar.open_nodes.pop(
astar.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCamelCase__ )
else:
astar.open_nodes.append(UpperCamelCase__ )
return [self.fwd_astar.start.pos]
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: Node , UpperCamelCase__: Node ):
lowerCamelCase__ : Any = self.fwd_astar.retrace_path(UpperCamelCase__ )
lowerCamelCase__ : Any = self.bwd_astar.retrace_path(UpperCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase__ : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_A : int =(0, 0)
_A : Any =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_A : List[Any] =time.time()
_A : Optional[Any] =AStar(init, goal)
_A : List[Any] =a_star.search()
_A : Dict =time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
_A : List[Any] =time.time()
_A : int =BidirectionalAStar(init, goal)
_A : List[str] =time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 41 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_A : Union[str, Any] =False
class _lowercase ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowerCamelCase__ : List[Any] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = pipe(
image=UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowerCamelCase__ : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase__ : Tuple = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 41 | 1 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
if height >= 1:
move_tower(height - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
move_disk(__lowerCAmelCase , __lowerCAmelCase )
move_tower(height - 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
print("""moving disk from""" , __lowerCAmelCase , """to""" , __lowerCAmelCase )
def _lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(input("""Height of hanoi: """ ).strip() )
move_tower(__lowerCAmelCase , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 56 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [0] * len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Any = [1] * len(__lowerCAmelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCAmelCase )
while queue:
SCREAMING_SNAKE_CASE__ : str = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE__ : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowerCAmelCase )
print(max(__lowerCAmelCase ) )
# Adjacency list of Graph
a :int = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 56 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE :Any = '''bart'''
SCREAMING_SNAKE_CASE :Any = True
@st.cache(allow_output_mutation=lowerCAmelCase_ )
def _lowerCAmelCase ( )->List[Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
snake_case_ = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
snake_case_ = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
snake_case_ = qar_model.eval()
else:
snake_case_ , snake_case_ = (None, None)
if MODEL_TYPE == "bart":
snake_case_ = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
snake_case_ = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
snake_case_ = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
snake_case_ = sas_model.eval()
else:
snake_case_ , snake_case_ = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase_ )
def _lowerCAmelCase ( )->Tuple:
'''simple docstring'''
if LOAD_DENSE_INDEX:
snake_case_ = faiss.StandardGpuResources()
snake_case_ = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
snake_case_ = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
snake_case_ = faiss.IndexFlatIP(128 )
snake_case_ = faiss.index_cpu_to_gpu(lowerCAmelCase_ , 1 , lowerCAmelCase_ )
wikiaab_gpu_index_flat.add(lowerCAmelCase_ ) # TODO fix for larger GPU
else:
snake_case_ , snake_case_ = (None, None)
snake_case_ = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase_ )
def _lowerCAmelCase ( )->Union[str, Any]:
'''simple docstring'''
snake_case_ = datasets.load_dataset("eli5" , name="LFQA_reddit" )
snake_case_ = elia["train_eli5"]
snake_case_ = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
snake_case_ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCAmelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Union[str, Any] = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Optional[int] = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Union[str, Any] = load_train_data()
def _lowerCAmelCase ( lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :List[Any]=10 )->int:
'''simple docstring'''
snake_case_ = embed_questions_for_retrieval([question] , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ , snake_case_ = eli5_train_q_index.search(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = [elia_train[int(lowerCAmelCase_ )] for i in I[0]]
return nn_examples
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Optional[int]="wiki40b" , lowerCAmelCase_ :Optional[Any]="dense" , lowerCAmelCase_ :Any=10 )->Union[str, Any]:
'''simple docstring'''
if source == "none":
snake_case_ , snake_case_ = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case_ , snake_case_ = query_qa_dense_index(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
snake_case_ , snake_case_ = query_es_index(
lowerCAmelCase_ , lowerCAmelCase_ , index_name="english_wiki40b_snippets_100w" , n_results=lowerCAmelCase_ , )
snake_case_ = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
snake_case_ = "question: {} context: {}".format(lowerCAmelCase_ , lowerCAmelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCAmelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCAmelCase_ : None),
} )
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Optional[int] , lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :int=64 , lowerCAmelCase_ :str=256 , lowerCAmelCase_ :int=False , lowerCAmelCase_ :Optional[int]=2 , lowerCAmelCase_ :Optional[int]=0.9_5 , lowerCAmelCase_ :str=0.8 )->Any:
'''simple docstring'''
with torch.no_grad():
snake_case_ = qa_sas_generate(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_answers=1 , num_beams=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ , do_sample=lowerCAmelCase_ , temp=lowerCAmelCase_ , top_p=lowerCAmelCase_ , top_k=lowerCAmelCase_ , max_input_length=1_024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE :Dict = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE :Optional[int] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE :Optional[Any] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE :Tuple = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE :Any = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE :Tuple = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE :Optional[int] = action_list.index(action_st)
SCREAMING_SNAKE_CASE :Any = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE :Optional[int] = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE :List[str] = 3
SCREAMING_SNAKE_CASE :Dict = True
SCREAMING_SNAKE_CASE :List[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE :str = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE :str = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE :Union[str, Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE :Dict = '''wiki40b'''
SCREAMING_SNAKE_CASE :Optional[int] = '''dense'''
SCREAMING_SNAKE_CASE :str = '''beam'''
SCREAMING_SNAKE_CASE :List[str] = 2
SCREAMING_SNAKE_CASE :int = 64
SCREAMING_SNAKE_CASE :List[str] = 2_56
SCREAMING_SNAKE_CASE :str = None
SCREAMING_SNAKE_CASE :Optional[Any] = None
SCREAMING_SNAKE_CASE :int = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE :Optional[Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE :str = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE :Optional[Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE :Union[str, Any] = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE :List[Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE :Any = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE :Optional[Any] = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE :Union[str, Any] = None
# start main text
SCREAMING_SNAKE_CASE :Any = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE :Optional[Any] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE :List[Any] = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE :str = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :int = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Optional[int] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE :Optional[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE :Union[str, Any] = support_list[:10]
SCREAMING_SNAKE_CASE :int = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :str = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE :Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE :Dict = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE :Tuple = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE :Tuple = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE :Optional[int] = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE :List[Any] = find_nearest_training(question)
SCREAMING_SNAKE_CASE :List[Any] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE :Any = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE :Optional[int] = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 159 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int=1_3 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Any=2_2_4 , _lowerCAmelCase : Any=1_0_0_0 , _lowerCAmelCase : Any=[3, 3, 6, 4] , _lowerCAmelCase : Any=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> List[Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = num_labels
snake_case_ = image_size
snake_case_ = layer_depths
snake_case_ = embed_dims
def lowerCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_lowerCAmelCase , layer_scale_init_value=1e-5 , )
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
snake_case_ = SwiftFormerModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case_ = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
snake_case_ = SwiftFormerForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
((snake_case_) , (snake_case_) , (snake_case_)) = self.prepare_config_and_inputs()
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowerCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
snake_case_ = SwiftFormerModelTester(self )
snake_case_ = ConfigTester(
self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def lowerCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_lowerCAmelCase )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def lowerCAmelCase__ ( self : List[str] ) -> Any:
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_lowerCAmelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SwiftFormerModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def lowerCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(_lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple ):
snake_case_ = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
snake_case_ = outputs.hidden_states
snake_case_ = 8
self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
def _config_zero_init(_lowerCAmelCase : List[str] ):
snake_case_ = copy.deepcopy(_lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_lowerCAmelCase , _lowerCAmelCase , 1e-10 )
if isinstance(getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase ):
snake_case_ = _config_zero_init(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return configs_no_init
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(_lowerCAmelCase )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=_lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowerCAmelCase ( )->str:
'''simple docstring'''
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(_lowerCAmelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**_lowerCAmelCase )
# verify the logits
snake_case_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
snake_case_ = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 159 | 1 |
"""simple docstring"""
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
UpperCAmelCase_= [[] for _ in range(lowerCAmelCase_ )]
UpperCAmelCase_= key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(lowerCAmelCase_ ) <= key:
return input_string
for position, character in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_= position % (lowest * 2) # puts it in bounds
UpperCAmelCase_= min(lowerCAmelCase_ ,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCAmelCase_ )
UpperCAmelCase_= ["""""".join(lowerCAmelCase_ ) for row in temp_grid]
UpperCAmelCase_= """""".join(lowerCAmelCase_ )
return output_string
def __a ( lowerCAmelCase_ : str ,lowerCAmelCase_ : int ) -> str:
'''simple docstring'''
UpperCAmelCase_= []
UpperCAmelCase_= key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
UpperCAmelCase_= [[] for _ in range(lowerCAmelCase_ )] # generates template
for position in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_= position % (lowest * 2) # puts it in bounds
UpperCAmelCase_= min(lowerCAmelCase_ ,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
UpperCAmelCase_= 0
for row in temp_grid: # fills in the characters
UpperCAmelCase_= input_string[counter : counter + len(lowerCAmelCase_ )]
grid.append(list(lowerCAmelCase_ ) )
counter += len(lowerCAmelCase_ )
UpperCAmelCase_= """""" # reads as zigzag
for position in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_= position % (lowest * 2) # puts it in bounds
UpperCAmelCase_= min(lowerCAmelCase_ ,lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __a ( lowerCAmelCase_ : str ) -> dict[int, str]:
'''simple docstring'''
UpperCAmelCase_= {}
for key_guess in range(1 ,len(lowerCAmelCase_ ) ): # tries every key
UpperCAmelCase_= decrypt(lowerCAmelCase_ ,lowerCAmelCase_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 352 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowercase :
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : List[Any]=13 , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Dict=99 , __UpperCAmelCase : Union[str, Any]=64 , __UpperCAmelCase : Dict=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : int=37 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : Union[str, Any]=512 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Tuple=4 , __UpperCAmelCase : str=None , ) -> str:
UpperCAmelCase_= parent
UpperCAmelCase_= batch_size
UpperCAmelCase_= seq_length
UpperCAmelCase_= is_training
UpperCAmelCase_= use_input_mask
UpperCAmelCase_= use_token_type_ids
UpperCAmelCase_= use_labels
UpperCAmelCase_= vocab_size
UpperCAmelCase_= hidden_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= hidden_act
UpperCAmelCase_= hidden_dropout_prob
UpperCAmelCase_= attention_probs_dropout_prob
UpperCAmelCase_= max_position_embeddings
UpperCAmelCase_= type_vocab_size
UpperCAmelCase_= type_sequence_label_size
UpperCAmelCase_= initializer_range
UpperCAmelCase_= num_labels
UpperCAmelCase_= num_choices
UpperCAmelCase_= scope
UpperCAmelCase_= vocab_size - 1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_= None
if self.use_input_mask:
UpperCAmelCase_= random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_= None
if self.use_labels:
UpperCAmelCase_= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_= self.get_config()
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_= True
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_= GPTNeoXModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
UpperCAmelCase_= model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) -> Dict:
UpperCAmelCase_= True
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> int:
UpperCAmelCase_= GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForQuestionAnswering(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Dict ) -> Dict:
UpperCAmelCase_= self.num_labels
UpperCAmelCase_= GPTNeoXForTokenClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int ) -> Optional[int]:
UpperCAmelCase_= True
UpperCAmelCase_= GPTNeoXForCausalLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# first forward pass
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase )
UpperCAmelCase_= outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_= ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_= ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_= torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_= torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_= model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
UpperCAmelCase_= output_from_no_past["""hidden_states"""][0]
UpperCAmelCase_= model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase_= ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_= output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_= output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
UpperCAmelCase_= self.prepare_config_and_inputs()
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= config_and_inputs
UpperCAmelCase_= {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
a__ : Union[str, Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
a__ : Any = (GPTNeoXForCausalLM,) if is_torch_available() else ()
a__ : str = (
{
"feature-extraction": GPTNeoXModel,
"question-answering": GPTNeoXForQuestionAnswering,
"text-classification": GPTNeoXForSequenceClassification,
"text-generation": GPTNeoXForCausalLM,
"token-classification": GPTNeoXForTokenClassification,
"zero-shot": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Optional[int] = False
a__ : Tuple = False
a__ : int = False
a__ : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_= GPTNeoXModelTester(self )
UpperCAmelCase_= ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=64 , num_attention_heads=8 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_= None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
UpperCAmelCase_= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : Any ) -> Dict:
UpperCAmelCase_, UpperCAmelCase_= self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_= ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase_= ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
original_model.to(__UpperCAmelCase )
original_model.eval()
UpperCAmelCase_= original_model(__UpperCAmelCase ).last_hidden_state
UpperCAmelCase_= original_model(__UpperCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_= {"""type""": scaling_type, """factor""": 10.0}
UpperCAmelCase_= GPTNeoXModel(__UpperCAmelCase )
scaled_model.to(__UpperCAmelCase )
scaled_model.eval()
UpperCAmelCase_= scaled_model(__UpperCAmelCase ).last_hidden_state
UpperCAmelCase_= scaled_model(__UpperCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) )
@require_torch
class lowercase ( unittest.TestCase):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
UpperCAmelCase_= AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
UpperCAmelCase_= GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(__UpperCAmelCase )
UpperCAmelCase_= tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__UpperCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase_= """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase_= model.generate(**__UpperCAmelCase , do_sample=__UpperCAmelCase , max_new_tokens=20 )
UpperCAmelCase_= tokenizer.batch_decode(__UpperCAmelCase )[0]
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
| 277 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__snake_case = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
__snake_case = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__snake_case = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__snake_case = sorted(arg_to_scheduler.keys())
__snake_case = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class lowercase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_="base" , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase_ )
UpperCamelCase__ :Any = 0
UpperCamelCase__ :str = Path(self.hparams.output_dir )
UpperCamelCase__ :List[str] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCamelCase__ :Optional[int] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase_ , **UpperCamelCase_ , )
else:
UpperCamelCase__ :PretrainedConfig = config
UpperCamelCase__ :int = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase_ , UpperCamelCase_ ):
assert hasattr(self.config , UpperCamelCase_ ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , UpperCamelCase_ , getattr(self.hparams , UpperCamelCase_ ) )
if tokenizer is None:
UpperCamelCase__ :Dict = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase_ , )
else:
UpperCamelCase__ :PreTrainedTokenizer = tokenizer
UpperCamelCase__ :Optional[int] = MODEL_MODES[mode]
if model is None:
UpperCamelCase__ :str = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase_ , )
else:
UpperCamelCase__ :List[Any] = model
def lowerCAmelCase__ ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = self.model_type.from_pretrained(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCamelCase__ :Dict = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCamelCase__ :Any = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = self.model
UpperCamelCase__ :Optional[Any] = ['''bias''', '''LayerNorm.weight''']
UpperCamelCase__ :Optional[Any] = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCamelCase__ :List[str] = Adafactor(
UpperCamelCase_ , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase_ , relative_step=UpperCamelCase_ )
else:
UpperCamelCase__ :int = AdamW(
UpperCamelCase_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCamelCase__ :str = optimizer
UpperCamelCase__ :int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
return self.validation_step(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return self.validation_end(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCamelCase__ :List[str] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if stage == "test":
UpperCamelCase__ :Union[str, Any] = len(self.test_dataloader().dataset )
else:
UpperCamelCase__ :List[Any] = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=UpperCamelCase_ )
UpperCamelCase__ :List[Any] = len(self.train_dataloader().dataset )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False ):
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.train_loader
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
UpperCamelCase_ , list(filter(UpperCamelCase_ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = self.output_dir.joinpath('''best_tfmr''' )
UpperCamelCase__ :int = self.step_count
self.model.save_pretrained(UpperCamelCase_ )
self.tokenizer.save_pretrained(UpperCamelCase_ )
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''' , default=UpperCamelCase_ , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=UpperCamelCase_ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=UpperCamelCase_ , type=UpperCamelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(UpperCamelCase_ ).parent / '''test_run''' / '''cache''' ) , type=UpperCamelCase_ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=UpperCamelCase_ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=UpperCamelCase_ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=UpperCamelCase_ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=UpperCamelCase_ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=UpperCamelCase_ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=UpperCamelCase_ , metavar=UpperCamelCase_ , type=UpperCamelCase_ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=UpperCamelCase_ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=UpperCamelCase_ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=UpperCamelCase_ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=UpperCamelCase_ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=UpperCamelCase_ )
parser.add_argument('''--train_batch_size''' , default=32 , type=UpperCamelCase_ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=UpperCamelCase_ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowercase ( pl.Callback ):
"""simple docstring"""
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowercase ( pl.Callback ):
"""simple docstring"""
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase_ )
class lowercase ( pl.Callback ):
"""simple docstring"""
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = trainer.lr_schedulers[0]['''scheduler''']
UpperCamelCase__ :Dict = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
UpperCamelCase__ :Dict = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(UpperCamelCase_ , str(metrics[key] ) ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
UpperCamelCase__ :int = trainer.callback_metrics
# Log and save results to file
UpperCamelCase__ :Any = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(UpperCamelCase_ , '''w''' ) as writer:
for key in sorted(UpperCamelCase_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(UpperCamelCase_ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(UpperCamelCase_ , str(metrics[key] ) ) )
def a ( __a , __a ) -> None:
'''simple docstring'''
parser.add_argument(
'''--output_dir''' , default=str(Path(__a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=__a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=__a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=__a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=__a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=__a , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(__a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=__a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def a ( __a , __a , __a=None , __a=True , __a=[] , __a=None , __a=None , **__a , ) -> str:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
UpperCamelCase__ :Union[str, Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__a )
# add custom checkpoints
if checkpoint_callback is None:
UpperCamelCase__ :Any = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__a )
if logging_callback is None:
UpperCamelCase__ :str = LoggingCallback()
UpperCamelCase__ :Optional[Any] = {}
if args.fpaa:
UpperCamelCase__ :List[Any] = 16
if args.gpus > 1:
UpperCamelCase__ :Union[str, Any] = '''auto'''
UpperCamelCase__ :Optional[Any] = '''ddp'''
UpperCamelCase__ :List[str] = args.accumulate_grad_batches
UpperCamelCase__ :Dict = None
UpperCamelCase__ :Optional[Any] = '''auto'''
UpperCamelCase__ :List[str] = pl.Trainer.from_argparse_args(
__a , weights_summary=__a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__a , val_check_interval=1 , num_sanity_val_steps=2 , **__a , )
if args.do_train:
trainer.fit(__a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer | 97 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( __a , __a ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__a , __a )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :Tuple = JsonDatasetReader(__a , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[Any] = features.copy() if features else default_expected_features
UpperCamelCase__ :Tuple = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :int = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :int = tmp_path / '''cache'''
UpperCamelCase__ :str = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
UpperCamelCase__ :Any = features.copy() if features else default_expected_features
UpperCamelCase__ :Union[str, Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Any = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
UpperCamelCase__ :int = features.copy()
UpperCamelCase__ :List[Any] = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Optional[int] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = JsonDatasetReader(__a , features=__a , cache_dir=__a ).read()
assert isinstance(__a , __a )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = tmp_path / '''cache'''
UpperCamelCase__ :Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[Any] = JsonDatasetReader(__a , cache_dir=__a , split=__a ).read()
_check_json_dataset(__a , __a )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def a ( __a , __a , __a ) -> Any:
'''simple docstring'''
if issubclass(__a , __a ):
UpperCamelCase__ :Union[str, Any] = jsonl_path
elif issubclass(__a , __a ):
UpperCamelCase__ :int = [jsonl_path]
UpperCamelCase__ :Dict = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :List[str] = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_dataset(__a , __a )
def a ( __a , __a , __a=("train",) ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__a , __a )
for split in splits:
UpperCamelCase__ :Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def a ( __a , __a , __a ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ :List[str] = tmp_path / '''cache'''
UpperCamelCase__ :Dict = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ :str = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__a , keep_in_memory=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def a ( __a , __a , __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path / '''cache'''
UpperCamelCase__ :Any = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase__ :str = (
Features({feature: Value(__a ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ :Dict = JsonDatasetReader({'''train''': jsonl_path} , features=__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def a ( __a , __a , __a ) -> str:
'''simple docstring'''
if split:
UpperCamelCase__ :List[str] = {split: jsonl_path}
else:
UpperCamelCase__ :int = '''train'''
UpperCamelCase__ :int = {'''train''': jsonl_path, '''test''': jsonl_path}
UpperCamelCase__ :Any = tmp_path / '''cache'''
UpperCamelCase__ :Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
UpperCamelCase__ :Any = JsonDatasetReader(__a , cache_dir=__a ).read()
_check_json_datasetdict(__a , __a , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( __a ) -> Union[str, Any]:
'''simple docstring'''
return json.load(__a )
def a ( __a ) -> int:
'''simple docstring'''
return [json.loads(__a ) for line in buffer]
class lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :List[Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ :Optional[int] = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :Union[str, Any] = load_json_function(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert isinstance(exported_content[0] , UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , lines=UpperCamelCase_ , orient=UpperCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ :int = load_json(UpperCamelCase_ )
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase_ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase_ ) == 10
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
with pytest.raises(UpperCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = tmp_path_factory.mktemp('''data''' ) / F'''test.json.{extension}'''
UpperCamelCase__ :Union[str, Any] = str(shared_datadir / F'''test_file.json.{extension}''' )
JsonDatasetWriter(UpperCamelCase_ , UpperCamelCase_ , compression=UpperCamelCase_ ).write()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :Dict = f.read()
with fsspec.open(UpperCamelCase_ , '''rb''' , compression='''infer''' ) as f:
UpperCamelCase__ :int = f.read()
assert exported_content == original_content | 97 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Dict = DistilBertTokenizer
lowerCamelCase_ : List[str] = DistilBertTokenizerFast
lowerCamelCase_ : List[str] = True
@slow
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
snake_case_ = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
snake_case_ = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase )
snake_case_ = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 359 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase_ = {
'''yjernite/retribert-base-uncased''': 512,
}
lowerCamelCase_ = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : Union[str, Any] = RetriBertTokenizer
lowerCamelCase_ : str = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> List[Any]:
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**lowerCamelCase )
snake_case_ = do_lower_case
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase=None ) -> str:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase ) | 34 | 0 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCAmelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
lowerCAmelCase : Optional[int] = torch.permute(SCREAMING_SNAKE_CASE , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE ):
# linear layer
lowerCAmelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
lowerCAmelCase : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCAmelCase : Optional[int] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if "metadata" in layer:
lowerCAmelCase : int = layer.split("metadata" )
lowerCAmelCase : Dict = "".join(split_layer[0] )[:-1]
lowerCAmelCase : List[str] = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
lowerCAmelCase : Optional[Any] = layer.split("kvstore" )
lowerCAmelCase : Dict = "".join(split_layer[0] )[:-1]
lowerCAmelCase : str = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
lowerCAmelCase : Any = layer.split("/" )
lowerCAmelCase : Optional[int] = "/".join(split_layer[:-1] )
lowerCAmelCase : str = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCAmelCase : int = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
lowerCAmelCase : Union[str, Any] = "file"
else:
lowerCAmelCase : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : str = rename_keys(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = {}
for k, v in current_block.items():
lowerCAmelCase : List[str] = v
lowerCAmelCase : str = new_current_block
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str = WEIGHTS_NAME ):
'''simple docstring'''
lowerCAmelCase : str = convert_file_size_to_int(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = []
lowerCAmelCase : int = {}
lowerCAmelCase : List[str] = 0
lowerCAmelCase : str = 0
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
lowerCAmelCase : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
lowerCAmelCase : List[Any] = flatten_dict(SCREAMING_SNAKE_CASE , sep="/" )
lowerCAmelCase : Union[str, Any] = {}
for layer in checkpoint_info.keys():
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = get_key_and_tensorstore_dict(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if curr_real_layer_name in all_layers:
lowerCAmelCase : Any = content
else:
lowerCAmelCase : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCAmelCase : Optional[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCAmelCase : Tuple = torch.tensor(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = "/".join(SCREAMING_SNAKE_CASE )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCAmelCase : Optional[Any] = os.path.join(
SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , f"""-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
rename_and_save_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : Any = 0
lowerCAmelCase : Optional[int] = raw_weights.to(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCAmelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , f"""-{len(SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
rename_and_save_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(SCREAMING_SNAKE_CASE ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCAmelCase : Dict = {}
lowerCAmelCase : List[str] = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Union[str, Any] = weights_name.replace(
".bin" , f"""-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE ):05d}.bin""" ) # len(sharded_state_dicts):05d}
lowerCAmelCase : Any = os.path.join(SCREAMING_SNAKE_CASE , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : int = shard
for key in shard:
lowerCAmelCase : Optional[int] = shard_file
# Add the metadata
lowerCAmelCase : List[Any] = {"total_size": total_size}
lowerCAmelCase : List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "w" , encoding="utf-8" ) as f:
lowerCAmelCase : Union[str, Any] = json.dumps(SCREAMING_SNAKE_CASE , indent=2 , sort_keys=SCREAMING_SNAKE_CASE ) + "\n"
f.write(SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
lowerCAmelCase__ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCAmelCase : Tuple = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
lowerCAmelCase : Dict = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
lowerCAmelCase : Dict = TaTokenizer.from_pretrained("t5-small" )
lowerCAmelCase : Dict = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
lowerCAmelCase : List[str] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_ids
lowerCAmelCase : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 108 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : int
a : TreeNode | None =None
a : TreeNode | None =None
lowerCAmelCase__ = namedtuple('''CoinsDistribResult''', '''moves excess''')
def a__ ( SCREAMING_SNAKE_CASE : TreeNode | None ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE ) != count_coins(SCREAMING_SNAKE_CASE ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowerCAmelCase , lowerCAmelCase : List[str] = get_distrib(node.left )
lowerCAmelCase , lowerCAmelCase : List[str] = get_distrib(node.right )
lowerCAmelCase : Union[str, Any] = 1 - left_distrib_excess
lowerCAmelCase : List[Any] = 1 - right_distrib_excess
lowerCAmelCase : int = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE )
+ abs(SCREAMING_SNAKE_CASE )
)
lowerCAmelCase : int = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return get_distrib(SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_ = BitConfig(
conv_layer=__lowerCamelCase, num_labels=10_00, idalabel=__lowerCamelCase, labelaid=__lowerCamelCase, )
return config
def A__ ( __lowerCamelCase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_ = '''bit.encoder.''' + name
return name
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
SCREAMING_SNAKE_CASE_ = get_config(__lowerCamelCase )
# load original model from timm
SCREAMING_SNAKE_CASE_ = create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = BitForImageClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# create image processor
SCREAMING_SNAKE_CASE_ = create_transform(**resolve_data_config({}, model=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = transform.transforms
SCREAMING_SNAKE_CASE_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__lowerCamelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__lowerCamelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__lowerCamelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = transform(__lowerCamelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = processor(__lowerCamelCase, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCamelCase, __lowerCamelCase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_ = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 352 |
def A__ ( __lowerCamelCase = 10_00 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3, n + 1 ) )
if __name__ == "__main__":
print(solution())
| 257 | 0 |
import os
import sys
import unittest
SCREAMING_SNAKE_CASE_:int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE_:Optional[Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = find_backend(""" if not is_torch_available():""" )
self.assertEqual(__UpperCamelCase, """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A : Tuple = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(__UpperCamelCase, """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A : List[str] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(__UpperCamelCase, """torch_and_transformers_and_onnx""" )
def _lowerCAmelCase ( self ):
A : Any = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""", __UpperCamelCase )
self.assertIn("""torch_and_transformers""", __UpperCamelCase )
self.assertIn("""flax_and_transformers""", __UpperCamelCase )
self.assertIn("""torch_and_transformers_and_onnx""", __UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""", objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""", objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""", objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""", objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""", objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""", objects["""torch_and_transformers_and_onnx"""] )
def _lowerCAmelCase ( self ):
A : List[Any] = create_dummy_object("""CONSTANT""", """\'torch\'""" )
self.assertEqual(__UpperCamelCase, """\nCONSTANT = None\n""" )
A : Optional[Any] = create_dummy_object("""function""", """\'torch\'""" )
self.assertEqual(
__UpperCamelCase, """\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n""" )
A : Dict = """
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
"""
A : Optional[Any] = create_dummy_object("""FakeClass""", """\'torch\'""" )
self.assertEqual(__UpperCamelCase, __UpperCamelCase )
def _lowerCAmelCase ( self ):
A : str = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
A : Any = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""], __UpperCamelCase )
| 116 | """simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
UpperCAmelCase = 299_792_458
# Symbols
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = symbols("""ct x y z""")
def lowercase ( a__ : float ) -> float:
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def lowercase ( a__ : float ) -> float:
return 1 / sqrt(1 - beta(a__ ) ** 2 )
def lowercase ( a__ : float ) -> np.ndarray:
return np.array(
[
[gamma(a__ ), -gamma(a__ ) * beta(a__ ), 0, 0],
[-gamma(a__ ) * beta(a__ ), gamma(a__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase ( a__ : float , a__ : np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
_UpperCamelCase = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(a__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
UpperCAmelCase = transform(29_979_245)
print("""Example of four vector: """)
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
UpperCAmelCase = {ct: c, x: 1, y: 1, z: 1}
UpperCAmelCase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 256 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : int=0)-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(a_))
__lowerCAmelCase: Optional[int] = np.random.RandomState(a_)
__lowerCAmelCase: List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=a_)
__lowerCAmelCase: Optional[int] = self.get_dummy_inputs()
__lowerCAmelCase: List[Any] = pipe(**a_).images
__lowerCAmelCase: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowerCAmelCase: str = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087])
assert np.abs(image_slice - expected_slice).max() < 1e-1
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase: List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a_)
pipe.set_progress_bar_config(disable=a_)
__lowerCAmelCase: Union[str, Any] = self.get_dummy_inputs()
__lowerCAmelCase: Optional[int] = pipe(**a_).images
__lowerCAmelCase: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowerCAmelCase: Optional[Any] = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowercase_ ( self : Dict)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase: Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a_)
# warmup pass to apply optimizations
__lowerCAmelCase: Optional[int] = pipe(**self.get_dummy_inputs())
__lowerCAmelCase: Optional[int] = self.get_dummy_inputs()
__lowerCAmelCase: Any = pipe(**a_).images
__lowerCAmelCase: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowerCAmelCase: List[str] = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase: List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a_)
__lowerCAmelCase: Optional[int] = self.get_dummy_inputs()
__lowerCAmelCase: Any = pipe(**a_).images
__lowerCAmelCase: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowerCAmelCase: Dict = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase: Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a_)
__lowerCAmelCase: Union[str, Any] = self.get_dummy_inputs()
__lowerCAmelCase: Optional[Any] = pipe(**a_).images
__lowerCAmelCase: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowerCAmelCase: Optional[Any] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def lowercase_ ( self : List[str])-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase: List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=a_)
__lowerCAmelCase: Optional[int] = self.get_dummy_inputs()
__lowerCAmelCase: int = pipe(**a_).images
__lowerCAmelCase: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowerCAmelCase: Dict = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case ( unittest.TestCase ):
@property
def lowercase_ ( self : Union[str, Any])-> Union[str, Any]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
__lowerCAmelCase: int = ort.SessionOptions()
__lowerCAmelCase: Dict = False
return options
def lowercase_ ( self : Any)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
__lowerCAmelCase: Any = init_image.resize((7_6_8, 5_1_2))
# using the PNDM scheduler by default
__lowerCAmelCase: Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_)
__lowerCAmelCase: Tuple = '''A fantasy landscape, trending on artstation'''
__lowerCAmelCase: List[Any] = np.random.RandomState(0)
__lowerCAmelCase: int = pipe(
prompt=a_ , image=a_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=a_ , output_type="np" , )
__lowerCAmelCase: List[Any] = output.images
__lowerCAmelCase: Dict = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
__lowerCAmelCase: int = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg")
__lowerCAmelCase: List[Any] = init_image.resize((7_6_8, 5_1_2))
__lowerCAmelCase: Any = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx")
__lowerCAmelCase: List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_)
__lowerCAmelCase: str = '''A fantasy landscape, trending on artstation'''
__lowerCAmelCase: Union[str, Any] = np.random.RandomState(0)
__lowerCAmelCase: List[str] = pipe(
prompt=a_ , image=a_ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=a_ , output_type="np" , )
__lowerCAmelCase: List[str] = output.images
__lowerCAmelCase: Union[str, Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
__lowerCAmelCase: Optional[int] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
| 352 |
"""simple docstring"""
from math import ceil
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: Tuple = list(range(0 , __SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Optional[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCAmelCase: List[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(__SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__SCREAMING_SNAKE_CASE )
# Missing blocks
__lowerCAmelCase: Optional[Any] = [i for i in blocks if i not in device_map_blocks]
__lowerCAmelCase: List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(__SCREAMING_SNAKE_CASE ) )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase: List[Any] = list(range(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase: Dict = int(ceil(n_layers / len(__SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase: Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
| 108 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 69 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Any:
__snake_case : List[Any] = dataset
__snake_case : Optional[int] = process
__snake_case : str = params
def __len__( self : Optional[Any] ) -> Any:
return len(self.dataset )
def __getitem__( self : Dict , lowerCamelCase : List[Any] ) -> List[str]:
__snake_case : List[Any] = self.dataset[i]
__snake_case : Tuple = self.process(lowerCamelCase , **self.params )
return processed
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict=None ) -> int:
__snake_case : List[Any] = loader
__snake_case : Dict = infer
__snake_case : Tuple = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__snake_case : Union[str, Any] = None
__snake_case : Optional[Any] = loader_batch_size
# Internal bookkeeping
__snake_case : int = None
__snake_case : Optional[int] = None
def __len__( self : Optional[Any] ) -> Tuple:
return len(self.loader )
def __iter__( self : str ) -> Tuple:
__snake_case : int = iter(self.loader )
return self
def __snake_case ( self : int ) -> Any:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__snake_case : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__snake_case : int = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
# Convert ModelOutput to tuple first
__snake_case : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__snake_case : Any = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__snake_case : Optional[Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase , lowerCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__snake_case : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__snake_case : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__snake_case : Union[str, Any] = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__snake_case : List[Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__snake_case : Optional[Any] = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__snake_case : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__snake_case : str = self._loader_batch_data.__class__(lowerCamelCase )
self._loader_batch_index += 1
return result
def __snake_case ( self : Dict ) -> Union[str, Any]:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__snake_case : List[str] = next(self.iterator )
__snake_case : int = self.infer(lowerCamelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : List[Any] = processed
else:
__snake_case : Optional[Any] = list(processed.keys() )[0]
__snake_case : List[Any] = processed[key]
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[str] = len(lowerCamelCase )
else:
__snake_case : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__snake_case : Optional[Any] = observed_batch_size
# Setting internal index to unwrap the batch
__snake_case : Union[str, Any] = processed
__snake_case : str = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[int]=None ) -> Any:
super().__init__(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __iter__( self : Optional[int] ) -> Optional[int]:
__snake_case : Union[str, Any] = iter(self.loader )
__snake_case : int = None
return self
def __snake_case ( self : List[Any] ) -> List[Any]:
if self.subiterator is None:
__snake_case : Optional[int] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__snake_case : int = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__snake_case : Union[str, Any] = self.infer(next(self.iterator ) , **self.params )
__snake_case : int = next(self.subiterator )
return processed
class a (_lowerCAmelCase ):
"""simple docstring"""
def __iter__( self : Any ) -> Optional[Any]:
__snake_case : str = iter(self.loader )
return self
def __snake_case ( self : Tuple ) -> str:
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
__snake_case : Dict = False
__snake_case : Dict = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__snake_case : Union[str, Any] = self.loader_batch_item()
__snake_case : Any = item.pop("is_last" )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
while not is_last:
__snake_case : str = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase , torch.Tensor ):
__snake_case : Optional[int] = processed
else:
__snake_case : Union[str, Any] = list(processed.keys() )[0]
__snake_case : Optional[Any] = processed[key]
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : int = len(lowerCamelCase )
else:
__snake_case : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__snake_case : Dict = observed_batch_size
__snake_case : Union[str, Any] = processed
__snake_case : List[str] = 0
while self._loader_batch_index < self.loader_batch_size:
__snake_case : str = self.loader_batch_item()
__snake_case : str = item.pop("is_last" )
accumulator.append(lowerCamelCase )
if is_last:
return accumulator
else:
__snake_case : List[str] = processed
__snake_case : Tuple = item.pop("is_last" )
accumulator.append(lowerCamelCase )
return accumulator
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Dataset , lowerCamelCase : str ) -> Optional[Any]:
__snake_case : int = dataset
__snake_case : Union[str, Any] = key
def __len__( self : Tuple ) -> Union[str, Any]:
return len(self.dataset )
def __getitem__( self : Optional[Any] , lowerCamelCase : str ) -> Optional[int]:
return self.dataset[i][self.key]
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : Dataset , lowerCamelCase : str , lowerCamelCase : str ) -> List[str]:
__snake_case : Any = dataset
__snake_case : Any = keya
__snake_case : Union[str, Any] = keya
def __len__( self : Optional[int] ) -> Tuple:
return len(self.dataset )
def __getitem__( self : Tuple , lowerCamelCase : List[str] ) -> Optional[Any]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 123 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = 42
__a = 42
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = 42
__a = None
__a = None
class lowerCAmelCase__ ( A_ ):
__a = """train"""
__a = """dev"""
__a = """test"""
class lowerCAmelCase__ :
@staticmethod
def lowercase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase ( _lowerCamelCase : str ):
raise NotImplementedError
@staticmethod
def lowercase ( _lowerCamelCase : List[InputExample] , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : str=False , _lowerCamelCase : List[Any]="[CLS]" , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : List[str]="[SEP]" , _lowerCamelCase : int=False , _lowerCamelCase : List[Any]=False , _lowerCamelCase : List[Any]=0 , _lowerCamelCase : Dict=0 , _lowerCamelCase : List[str]=-100 , _lowerCamelCase : Optional[Any]=0 , _lowerCamelCase : Any=True , ):
_snake_case = {label: i for i, label in enumerate(_lowerCamelCase )}
_snake_case = []
for ex_index, example in enumerate(_lowerCamelCase ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' , _lowerCamelCase , len(_lowerCamelCase ) )
_snake_case = []
_snake_case = []
for word, label in zip(example.words , example.labels ):
_snake_case = tokenizer.tokenize(_lowerCamelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowerCamelCase ) > 0:
tokens.extend(_lowerCamelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowerCamelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_snake_case = tokenizer.num_special_tokens_to_add()
if len(_lowerCamelCase ) > max_seq_length - special_tokens_count:
_snake_case = tokens[: (max_seq_length - special_tokens_count)]
_snake_case = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_snake_case = [sequence_a_segment_id] * len(_lowerCamelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_snake_case = [cls_token] + tokens
_snake_case = [pad_token_label_id] + label_ids
_snake_case = [cls_token_segment_id] + segment_ids
_snake_case = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_snake_case = [1 if mask_padding_with_zero else 0] * len(_lowerCamelCase )
# Zero-pad up to the sequence length.
_snake_case = max_seq_length - len(_lowerCamelCase )
if pad_on_left:
_snake_case = ([pad_token] * padding_length) + input_ids
_snake_case = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_snake_case = ([pad_token_segment_id] * padding_length) + segment_ids
_snake_case = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowerCamelCase ) == max_seq_length
assert len(_lowerCamelCase ) == max_seq_length
assert len(_lowerCamelCase ) == max_seq_length
assert len(_lowerCamelCase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowerCamelCase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowerCamelCase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowerCamelCase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowerCamelCase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowerCamelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_snake_case = None
features.append(
InputFeatures(
input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , label_ids=_lowerCamelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase__ ( A_ ):
__a = 42
__a = nn.CrossEntropyLoss().ignore_index
def __init__( self : Tuple , _lowerCamelCase : TokenClassificationTask , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Tuple=False , _lowerCamelCase : Split = Split.train , ):
# Load data features from cache or dataset file
_snake_case = os.path.join(
_lowerCamelCase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowerCamelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_snake_case = cached_features_file + '''.lock'''
with FileLock(_lowerCamelCase ):
if os.path.exists(_lowerCamelCase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
_snake_case = torch.load(_lowerCamelCase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
_snake_case = token_classification_task.read_examples_from_file(_lowerCamelCase , _lowerCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
_snake_case = token_classification_task.convert_examples_to_features(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowerCamelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , _lowerCamelCase )
def __len__( self : Any ):
return len(self.features )
def __getitem__( self : List[Any] , _lowerCamelCase : List[Any] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase__ :
__a = 42
__a = -100
def __init__( self : str , _lowerCamelCase : TokenClassificationTask , _lowerCamelCase : str , _lowerCamelCase : PreTrainedTokenizer , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Split = Split.train , ):
_snake_case = token_classification_task.read_examples_from_file(_lowerCamelCase , _lowerCamelCase )
# TODO clean up all this to leverage built-in features of tokenizers
_snake_case = token_classification_task.convert_examples_to_features(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowerCamelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_snake_case = tf.data.Dataset.from_generator(
_lowerCamelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_snake_case = tf.data.Dataset.from_generator(
_lowerCamelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase ( self : Optional[int] ):
_snake_case = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Any ):
return len(self.features )
def __getitem__( self : List[Any] , _lowerCamelCase : Tuple ):
return self.features[i]
| 40 |
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase__ :
def __init__( self : Optional[int] , _lowerCamelCase : int = 0 ):
_snake_case = key
def lowercase ( self : Tuple , _lowerCamelCase : str , _lowerCamelCase : int ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
_snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowerCamelCase ) ^ key ) for ch in content]
def lowercase ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
_snake_case = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(_lowerCamelCase ) ^ key ) for ch in content]
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int = 0 ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
_snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_snake_case = ''''''
for ch in content:
ans += chr(ord(_lowerCamelCase ) ^ key )
return ans
def lowercase ( self : List[str] , _lowerCamelCase : str , _lowerCamelCase : int = 0 ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
_snake_case = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
_snake_case = ''''''
for ch in content:
ans += chr(ord(_lowerCamelCase ) ^ key )
return ans
def lowercase ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : int = 0 ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
try:
with open(_lowerCamelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_lowerCamelCase , _lowerCamelCase ) )
except OSError:
return False
return True
def lowercase ( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
try:
with open(_lowerCamelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_lowerCamelCase , _lowerCamelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 40 | 1 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : Tuple ):
super().tearDown()
gc.collect()
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Dict = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-canny''', from_pt=_snake_case, dtype=jnp.bfloataa )
UpperCAmelCase : Optional[int] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=_snake_case, from_pt=_snake_case, dtype=jnp.bfloataa )
UpperCAmelCase : Union[str, Any] = controlnet_params
UpperCAmelCase : Optional[Any] = '''bird'''
UpperCAmelCase : str = jax.device_count()
UpperCAmelCase : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' )
UpperCAmelCase : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
UpperCAmelCase : str = jax.random.PRNGKey(0 )
UpperCAmelCase : Dict = jax.random.split(_snake_case, jax.device_count() )
UpperCAmelCase : Any = replicate(_snake_case )
UpperCAmelCase : List[str] = shard(_snake_case )
UpperCAmelCase : Dict = shard(_snake_case )
UpperCAmelCase : Optional[Any] = pipe(
prompt_ids=_snake_case, image=_snake_case, params=_snake_case, prng_seed=_snake_case, num_inference_steps=5_0, jit=_snake_case, ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
UpperCAmelCase : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase : Optional[int] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCAmelCase : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase : Any = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : int = FlaxControlNetModel.from_pretrained(
'''lllyasviel/sd-controlnet-openpose''', from_pt=_snake_case, dtype=jnp.bfloataa )
UpperCAmelCase : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''', controlnet=_snake_case, from_pt=_snake_case, dtype=jnp.bfloataa )
UpperCAmelCase : Optional[Any] = controlnet_params
UpperCAmelCase : Optional[Any] = '''Chef in the kitchen'''
UpperCAmelCase : Union[str, Any] = jax.device_count()
UpperCAmelCase : List[str] = pipe.prepare_text_inputs([prompts] * num_samples )
UpperCAmelCase : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png''' )
UpperCAmelCase : Tuple = pipe.prepare_image_inputs([pose_image] * num_samples )
UpperCAmelCase : Dict = jax.random.PRNGKey(0 )
UpperCAmelCase : Optional[int] = jax.random.split(_snake_case, jax.device_count() )
UpperCAmelCase : Any = replicate(_snake_case )
UpperCAmelCase : Optional[int] = shard(_snake_case )
UpperCAmelCase : List[str] = shard(_snake_case )
UpperCAmelCase : str = pipe(
prompt_ids=_snake_case, image=_snake_case, params=_snake_case, prng_seed=_snake_case, num_inference_steps=5_0, jit=_snake_case, ).images
assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3)
UpperCAmelCase : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase : Dict = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCAmelCase : Union[str, Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase : Tuple = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 336 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[int]:
if "model" in orig_key:
lowercase__ : Tuple = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
lowercase__ : List[str] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
lowercase__ : Union[str, Any] = orig_key.split('''.''' )[0].split('''_''' )[-1]
lowercase__ : List[str] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowercase__ : Union[str, Any] = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
lowercase__ : str = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
lowercase__ : Any = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
lowercase__ : List[Any] = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
lowercase__ : Any = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
lowercase__ : Optional[int] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
lowercase__ : List[str] = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
lowercase__ : int = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
lowercase__ : Optional[Any] = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
lowercase__ : Optional[Any] = '''yoso.''' + orig_key
return orig_key
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
lowercase__ : Optional[Any] = orig_state_dict.pop(__lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowercase__ : Tuple = val
lowercase__ : Union[str, Any] = orig_state_dict['''cls.predictions.decoder.bias''']
lowercase__ : List[str] = torch.arange(__lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Tuple = torch.load(__lowerCamelCase , map_location='''cpu''' )['''model_state_dict''']
lowercase__ : List[Any] = YosoConfig.from_json_file(__lowerCamelCase )
lowercase__ : List[Any] = YosoForMaskedLM(__lowerCamelCase )
lowercase__ : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , __lowerCamelCase )
print(model.load_state_dict(__lowerCamelCase ) )
model.eval()
model.save_pretrained(__lowerCamelCase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 16 | 0 |
from statistics import mean
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list:
__snake_case: Union[str, Any] = 0
# Number of processes finished
__snake_case: Tuple = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__snake_case: str = [0] * no_of_process
# List to include calculation results
__snake_case: str = [0] * no_of_process
# Sort by arrival time.
__snake_case: str = [burst_time[i] for i in np.argsort(SCREAMING_SNAKE_CASE__)]
__snake_case: List[Any] = [process_name[i] for i in np.argsort(SCREAMING_SNAKE_CASE__)]
arrival_time.sort()
while no_of_process > finished_process_count:
__snake_case: str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__snake_case: str = arrival_time[i]
__snake_case: List[Any] = 0
# Index showing the location of the process being performed
__snake_case: Optional[int] = 0
# Saves the current response ratio.
__snake_case: Optional[int] = 0
for i in range(0 , SCREAMING_SNAKE_CASE__):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__snake_case: int = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__snake_case: int = temp
__snake_case: str = i
# Calculate the turn around time
__snake_case: Tuple = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__snake_case: Any = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list:
__snake_case: int = [0] * no_of_process
for i in range(0 , SCREAMING_SNAKE_CASE__):
__snake_case: List[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = 5
__UpperCAmelCase : Tuple = ["A", "B", "C", "D", "E"]
__UpperCAmelCase : int = [1, 2, 3, 4, 5]
__UpperCAmelCase : Optional[Any] = [1, 2, 3, 4, 5]
__UpperCAmelCase : Dict = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__UpperCAmelCase : str = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
f'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(f'average waiting time : {mean(waiting_time):.5f}')
print(f'average turn around time : {mean(turn_around_time):.5f}')
| 293 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """num_attention_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self : int , A : str , A : Dict=13 , A : str=32 , A : Any=2 , A : Optional[Any]=3 , A : str=640 , A : Tuple=4 , A : Dict="silu" , A : List[Any]=3 , A : Any=32 , A : Any=0.1 , A : int=0.1 , A : Dict=0.1 , A : Optional[Any]=0.02 , A : List[Any]=True , A : Tuple=True , A : Any=10 , A : Optional[int]=None , ):
__snake_case: List[Any] = parent
__snake_case: Dict = batch_size
__snake_case: int = image_size
__snake_case: Tuple = patch_size
__snake_case: Tuple = num_channels
__snake_case: str = last_hidden_size
__snake_case: Dict = num_attention_heads
__snake_case: Dict = hidden_act
__snake_case: Tuple = conv_kernel_size
__snake_case: List[str] = output_stride
__snake_case: List[str] = hidden_dropout_prob
__snake_case: Optional[Any] = attention_probs_dropout_prob
__snake_case: int = classifier_dropout_prob
__snake_case: List[Any] = use_labels
__snake_case: Union[str, Any] = is_training
__snake_case: Union[str, Any] = num_labels
__snake_case: str = initializer_range
__snake_case: List[Any] = scope
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case: Tuple = None
__snake_case: Any = None
if self.use_labels:
__snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , A : Optional[Any] , A : Any , A : Any , A : Union[str, Any] ):
__snake_case: List[Any] = MobileViTModel(config=A )
model.to(A )
model.eval()
__snake_case: int = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : str , A : List[Any] , A : Any , A : Any , A : int ):
__snake_case: str = self.num_labels
__snake_case: Optional[int] = MobileViTForImageClassification(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[Any] , A : int , A : str ):
__snake_case: List[Any] = self.num_labels
__snake_case: Dict = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case: Tuple = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case: Any = config_and_inputs
__snake_case: Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = MobileViTModelTester(self )
__snake_case: str = MobileViTConfigTester(self , config_class=A , has_text_modality=A )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = model_class(A )
__snake_case: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case: Optional[int] = [*signature.parameters.keys()]
__snake_case: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : Dict ):
def check_hidden_states_output(A : List[Any] , A : int , A : Tuple ):
__snake_case: List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__snake_case: str = model(**self._prepare_for_class(A , A ) )
__snake_case: Optional[int] = outputs.hidden_states
__snake_case: Any = 5
self.assertEqual(len(A ) , A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case: Union[str, Any] = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case: Dict = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: List[Any] = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def A__ ( ) -> Optional[int]:
__snake_case: Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase__ ( self : Dict ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(A )
__snake_case: str = self.default_image_processor
__snake_case: Optional[Any] = prepare_img()
__snake_case: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
# verify the logits
__snake_case: List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
__snake_case: Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = model.to(A )
__snake_case: Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[Any] = prepare_img()
__snake_case: List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: List[Any] = model(**A )
__snake_case: Optional[int] = outputs.logits
# verify the logits
__snake_case: Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A )
__snake_case: Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: str = model.to(A )
__snake_case: Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = prepare_img()
__snake_case: Optional[int] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
__snake_case: List[Any] = outputs.logits.detach().cpu()
__snake_case: List[str] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] )
__snake_case: str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A )
__snake_case: int = image_processor.post_process_semantic_segmentation(outputs=A )
__snake_case: Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A )
| 293 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
a : Dict = [8, 5, 9, 7]
a : int = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
a : int = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class a :
def __init__( self : int , lowercase_ : list[int] , lowercase_ : list[list[int]] , lowercase_ : list[list[int]] , ):
snake_case_ = claim_vector
snake_case_ = allocated_resources_table
snake_case_ = maximum_claim_table
def A_ ( self : Union[str, Any] ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def A_ ( self : List[Any] ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def A_ ( self : List[str] ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(lowercase_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def A_ ( self : Tuple ):
return {self.__need().index(lowercase_ ): i for i in self.__need()}
def A_ ( self : Optional[Any] , **lowercase_ : Dict ):
snake_case_ = self.__need()
snake_case_ = self.__allocated_resources_table
snake_case_ = self.__available_resources()
snake_case_ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
snake_case_ = False
for each_need in need_list:
snake_case_ = True
for index, need in enumerate(lowercase_ ):
if need > available_resources[index]:
snake_case_ = False
break
if execution:
snake_case_ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
snake_case_ = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(lowercase_ )
# update available/freed resources stack
snake_case_ = np.array(lowercase_ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(lowercase_ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def A_ ( self : List[Any] ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(lowercase_ ) + 1}"
+ ''' '''.join(F"{it:>8}" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(lowercase_ ) + 1}"
+ ''' '''.join(F"{it:>8}" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(lowercase_ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(lowercase_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class a ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Optional[Any] , lowercase_ : float , lowercase_ : Callable , lowercase_ : int , lowercase_ : float = 1.0 , lowercase_ : str = None , ):
super().__init__()
snake_case_ = initial_learning_rate
snake_case_ = warmup_steps
snake_case_ = power
snake_case_ = decay_schedule_fn
snake_case_ = name
def __call__( self : Tuple , lowercase_ : str ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
snake_case_ = tf.cast(lowercase_ , tf.floataa )
snake_case_ = tf.cast(self.warmup_steps , tf.floataa )
snake_case_ = global_step_float / warmup_steps_float
snake_case_ = self.initial_learning_rate * tf.math.pow(lowercase_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase_ , )
def A_ ( self : Any ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, __UpperCAmelCase = 0.9, __UpperCAmelCase = 0.9_9_9, __UpperCAmelCase = 1e-8, __UpperCAmelCase = None, __UpperCAmelCase = None, __UpperCAmelCase = 0.0, __UpperCAmelCase = 1.0, __UpperCAmelCase = None, ) -> List[str]:
'''simple docstring'''
snake_case_ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__UpperCAmelCase, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=__UpperCAmelCase, )
if num_warmup_steps:
snake_case_ = WarmUp(
initial_learning_rate=__UpperCAmelCase, decay_schedule_fn=__UpperCAmelCase, warmup_steps=__UpperCAmelCase, )
if weight_decay_rate > 0.0:
snake_case_ = AdamWeightDecay(
learning_rate=__UpperCAmelCase, weight_decay_rate=__UpperCAmelCase, beta_a=__UpperCAmelCase, beta_a=__UpperCAmelCase, epsilon=__UpperCAmelCase, clipnorm=__UpperCAmelCase, global_clipnorm=__UpperCAmelCase, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=__UpperCAmelCase, )
else:
snake_case_ = tf.keras.optimizers.Adam(
learning_rate=__UpperCAmelCase, beta_a=__UpperCAmelCase, beta_a=__UpperCAmelCase, epsilon=__UpperCAmelCase, clipnorm=__UpperCAmelCase, global_clipnorm=__UpperCAmelCase, )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class a ( _lowerCamelCase ):
def __init__( self : Dict , lowercase_ : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , lowercase_ : float = 0.9 , lowercase_ : float = 0.999 , lowercase_ : float = 1e-7 , lowercase_ : bool = False , lowercase_ : float = 0.0 , lowercase_ : Optional[List[str]] = None , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "AdamWeightDecay" , **lowercase_ : Optional[int] , ):
super().__init__(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
snake_case_ = weight_decay_rate
snake_case_ = include_in_weight_decay
snake_case_ = exclude_from_weight_decay
@classmethod
def A_ ( cls : Dict , lowercase_ : Union[str, Any] ):
snake_case_ = {'''WarmUp''': WarmUp}
return super(lowercase_ , cls ).from_config(lowercase_ , custom_objects=lowercase_ )
def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[int] ):
super(lowercase_ , self )._prepare_local(lowercase_ , lowercase_ , lowercase_ )
snake_case_ = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def A_ ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any ):
snake_case_ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : str=None , **lowercase_ : List[str] ):
snake_case_ ,snake_case_ = list(zip(*lowercase_ ) )
return super(lowercase_ , self ).apply_gradients(zip(lowercase_ , lowercase_ ) , name=lowercase_ , **lowercase_ )
def A_ ( self : List[Any] , lowercase_ : str , lowercase_ : str , lowercase_ : Any ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
snake_case_ = apply_state or {}
snake_case_ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
snake_case_ = self._fallback_apply_state(lowercase_ , lowercase_ )
snake_case_ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def A_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=None ):
snake_case_ ,snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ )
snake_case_ = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ )
with tf.control_dependencies([decay] ):
return super(lowercase_ , self )._resource_apply_dense(lowercase_ , lowercase_ , **lowercase_ )
def A_ ( self : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : List[Any]=None ):
snake_case_ ,snake_case_ = self._get_lr(var.device , var.dtype.base_dtype , lowercase_ )
snake_case_ = self._decay_weights_op(lowercase_ , lowercase_ , lowercase_ )
with tf.control_dependencies([decay] ):
return super(lowercase_ , self )._resource_apply_sparse(lowercase_ , lowercase_ , lowercase_ , **lowercase_ )
def A_ ( self : Union[str, Any] ):
snake_case_ = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def A_ ( self : Optional[int] , lowercase_ : int ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowercase_ , lowercase_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowercase_ , lowercase_ ) is not None:
return False
return True
class a ( _lowerCamelCase ):
def __init__( self : List[Any] ):
snake_case_ = []
snake_case_ = None
@property
def A_ ( self : Union[str, Any] ):
if self._accum_steps is None:
snake_case_ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def A_ ( self : Dict ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Any , lowercase_ : int ):
if not self._gradients:
snake_case_ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowercase_ ) , trainable=lowercase_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowercase_ ) != len(self._gradients ):
raise ValueError(F"Expected {len(self._gradients )} gradients, but got {len(lowercase_ )}" )
for accum_gradient, gradient in zip(self._gradients , lowercase_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowercase_ )
self._accum_steps.assign_add(1 )
def A_ ( self : Optional[int] ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowercase_ ) )
| 56 | 1 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__lowerCAmelCase = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__lowerCAmelCase = [0, 2_5, 5_0]
__lowerCAmelCase = [2_5, 5_0, 7_5]
__lowerCAmelCase = fuzz.membership.trimf(X, abca)
__lowerCAmelCase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__lowerCAmelCase = np.ones(7_5)
__lowerCAmelCase = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
__lowerCAmelCase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__lowerCAmelCase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__lowerCAmelCase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__lowerCAmelCase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__lowerCAmelCase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__lowerCAmelCase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__lowerCAmelCase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__lowerCAmelCase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 358 |
'''simple docstring'''
from functools import lru_cache
@lru_cache
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCamelCase_ ( __a , unittest.TestCase ):
lowerCAmelCase__ = WavaVecaPhonemeCTCTokenizer
lowerCAmelCase__ = False
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase__ : Tuple = (
'''<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '''
'''ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '''
'''ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '''
'''oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '''
'''pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '''
'''yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '''
'''əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '''
'''ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '''
'''ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '''
'''uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '''
'''ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '''
'''ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '''
'''ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'''
).split(''' ''' )
UpperCAmelCase__ : List[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Union[str, Any] = {'''pad_token''': '''<pad>''', '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>'''}
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
def lowercase_ ( self : int , _A : List[str] , _A : Union[str, Any]=False , _A : Optional[Any]=20 , _A : List[str]=5 ):
'''simple docstring'''
UpperCAmelCase__ : int = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=_A )) for i in range(len(_A ) )]
UpperCAmelCase__ : Tuple = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , do_phonemize=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
UpperCAmelCase__ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
UpperCAmelCase__ : Any = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase__ : Optional[int] = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase__ : Optional[Any] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
UpperCAmelCase__ : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
UpperCAmelCase__ : Tuple = ''' ''' + output_txt
UpperCAmelCase__ : Optional[int] = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def lowercase_ ( self : Union[str, Any] , **_A : Any ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
# check adding a single token
tokenizer.add_tokens('''xxx''' )
UpperCAmelCase__ : Dict = tokenizer('''m xxx ɪ''' , do_phonemize=_A ).input_ids
self.assertEqual(_A , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['''aaa''', '''bbb''', '''ccc'''] )
UpperCAmelCase__ : List[str] = tokenizer('''m aaa ɪ ccc''' , do_phonemize=_A ).input_ids
self.assertEqual(_A , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
UpperCAmelCase__ : Optional[Any] = tokenizer('''maɪ c''' , do_phonemize=_A ).input_ids
self.assertEqual(_A , [3, 200] ) # mai should be <unk> (=3)
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
UpperCAmelCase__ : Dict = '''Hello how are you'''
UpperCAmelCase__ : List[str] = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
self.assertEqual(_A , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
UpperCAmelCase__ : str = '''Hello how are you'''
UpperCAmelCase__ : Dict = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(_A ).input_ids , tokenizer(_A , do_phonemize=_A ).input_ids )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
UpperCAmelCase__ : Optional[Any] = '''Hello how are you'''
UpperCAmelCase__ : str = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
UpperCAmelCase__ : List[str] = tokenizer.decode(tokenizer(_A ).input_ids )
self.assertEqual(_A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
UpperCAmelCase__ : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
UpperCAmelCase__ : Tuple = tokenizer.decode(sample_ids[0] )
UpperCAmelCase__ : Optional[int] = tokenizer.batch_decode(_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
UpperCAmelCase__ : List[str] = '''Hello how are you'''
UpperCAmelCase__ : Optional[int] = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
self.assertEqual(_A , '''h ə l oʊ | h aʊ | ɑːɹ | j uː |''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
UpperCAmelCase__ : Dict = '''Hello how are you'''
UpperCAmelCase__ : Dict = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
self.assertEqual(tokenizer(_A ).input_ids , tokenizer(_A , do_phonemize=_A ).input_ids )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
UpperCAmelCase__ : Dict = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
UpperCAmelCase__ : List[str] = tokenizer.decode(sample_ids[0] )
UpperCAmelCase__ : List[Any] = tokenizer.batch_decode(_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ['''k s ɾ ɾ l ɭʲ''', '''j ð s j ð s oːɹ'''] )
# decode with no word_del_token filter
UpperCAmelCase__ : Any = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=_A )
UpperCAmelCase__ : Union[str, Any] = tokenizer.batch_decode(_A , filter_word_delimiter_token=_A )
self.assertEqual(_A , batch_tokens[0] )
self.assertEqual(_A , ['''k s ɾ | ɾ l | ɭʲ''', '''| j ð | s j ð s oːɹ'''] )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
UpperCAmelCase__ : Dict = '''Hello how are you'''
UpperCAmelCase__ : Optional[int] = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
UpperCAmelCase__ : Optional[int] = tokenizer.decode(tokenizer(_A ).input_ids , filter_word_delimiter_token=_A )
self.assertEqual(_A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
UpperCAmelCase__ : str = '''Hello how are you'''
UpperCAmelCase__ : Union[str, Any] = tokenizer.phonemize(_A , phonemizer_lang='''en-us''' )
UpperCAmelCase__ : Optional[Any] = tokenizer.decode(tokenizer(_A ).input_ids , filter_word_delimiter_token=_A )
self.assertEqual(''' '''.join([p.strip() for p in phonemes.split(''' |''' )] ).strip() , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.tokenizer_class.from_pretrained(
'''facebook/wav2vec2-lv-60-espeak-cv-ft''' , word_delimiter_token=_A )
UpperCAmelCase__ : Optional[int] = '''Hello how are you'''
UpperCAmelCase__ : Optional[int] = tokenizer(_A , phonemizer_lang='''en-us''' ).input_ids
UpperCAmelCase__ : Optional[int] = tokenizer(_A , phonemizer_lang='''fr-fr''' ).input_ids
self.assertNotEqual(_A , _A )
UpperCAmelCase__ : Optional[Any] = tokenizer.decode(_A )
UpperCAmelCase__ : List[Any] = tokenizer.decode(_A )
self.assertEqual(_A , '''h ə l oʊ h aʊ ɑːɹ j uː''' )
self.assertEqual(_A , '''ɛ l o h aʊ a ʁ j u''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
UpperCAmelCase__ : Optional[Any] = '''Hello how Are you'''
UpperCAmelCase__ : str = '''hello how are you'''
UpperCAmelCase__ : Optional[Any] = tokenizer(_A ).input_ids
UpperCAmelCase__ : Union[str, Any] = tokenizer(_A ).input_ids
self.assertEqual(_A , _A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained('''facebook/wav2vec2-lv-60-espeak-cv-ft''' )
tokenizer.add_tokens(['''!''', '''?'''] )
tokenizer.add_special_tokens({'''cls_token''': '''$$$'''} )
# fmt: off
UpperCAmelCase__ : Optional[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
UpperCAmelCase__ : Any = tokenizer.batch_decode(_A )
self.assertEqual(_A , ['''k s ɾ ɾ l ɭʲ!?!? $$$''', '''j ð s j ð s oːɹ $$$'''] )
@staticmethod
def lowercase_ ( _A : Any , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = [d[key] for d in offsets]
return retrieved_list
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer(word_delimiter_token='''|''' )
tokenizer.add_tokens('''|''' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
UpperCAmelCase__ : Optional[Any] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
UpperCAmelCase__ : int = tokenizer.decode(_A , output_char_offsets=_A , filter_word_delimiter_token=_A )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''char_offsets''' in outputs )
self.assertTrue(isinstance(_A , _A ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''char''' ) , ['''k''', '''s''', '''ɾ''', '''ɾ''', '''|''', '''ɾ''', '''l''', '''|''', '''ɭʲ'''] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''start_offset''' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['''char_offsets'''] , '''end_offset''' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_tokenizer(word_delimiter_token='''|''' )
def check_list_tuples_equal(_A : Dict , _A : Dict ):
self.assertTrue(isinstance(_A , _A ) )
self.assertTrue(isinstance(outputs_list[0] , _A ) )
# transform list to ModelOutput
UpperCAmelCase__ : Optional[int] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['''text'''] , outputs_batch_a['''text'''] )
def recursive_check(_A : Union[str, Any] , _A : Tuple ):
if isinstance(_A , _A ):
[recursive_check(_A , _A ) for la, la in zip(_A , _A )]
self.assertEqual(_A , _A )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['''char_offsets'''] , outputs_batch_a['''char_offsets'''] )
# fmt: off
UpperCAmelCase__ : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
UpperCAmelCase__ : Union[str, Any] = tokenizer.batch_decode(_A , output_char_offsets=_A )
UpperCAmelCase__ : int = [tokenizer.decode(_A , output_char_offsets=_A ) for ids in sample_ids]
check_list_tuples_equal(_A , _A )
@unittest.skip('''Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes''' )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeTokenizer always puts spaces between phonemes''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency''' )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip('''Wav2Vec2PhonemeModel has no max model length => no testing''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ : str = tokenizer.vocab_size
UpperCAmelCase__ : Optional[int] = len(_A )
self.assertNotEqual(_A , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase__ : Optional[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
UpperCAmelCase__ : Tuple = tokenizer.add_tokens(_A )
UpperCAmelCase__ : Union[str, Any] = tokenizer.vocab_size
UpperCAmelCase__ : Dict = len(_A )
self.assertNotEqual(_A , 0 )
self.assertEqual(_A , _A )
self.assertEqual(_A , len(_A ) )
self.assertEqual(_A , all_size + len(_A ) )
UpperCAmelCase__ : Dict = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_A )
self.assertGreaterEqual(len(_A ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
UpperCAmelCase__ : Tuple = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
UpperCAmelCase__ : int = tokenizer.add_special_tokens(_A )
UpperCAmelCase__ : List[Any] = tokenizer.vocab_size
UpperCAmelCase__ : List[Any] = len(_A )
self.assertNotEqual(_A , 0 )
self.assertEqual(_A , _A )
self.assertEqual(_A , len(_A ) )
self.assertEqual(_A , all_size_a + len(_A ) )
UpperCAmelCase__ : Any = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_A )
self.assertGreaterEqual(len(_A ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip('''The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
UpperCAmelCase__ : Any = ['''ð''', '''ɪ''', '''s''', '''ɪ''', '''z''', '''ɐ''', '''t''', '''ɛ''', '''k''', '''s''', '''t''']
UpperCAmelCase__ : str = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(output['''text'''] , _A )
| 181 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
@lru_cache()
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : int = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase__ : Optional[int] = bs[:]
UpperCAmelCase__ : List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase__ )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__ : Any = [chr(lowerCAmelCase__ ) for n in cs]
return dict(zip(lowerCAmelCase__ , lowerCAmelCase__ ) )
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : str = set()
UpperCAmelCase__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ : Optional[int] = char
return pairs
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , _A : Optional[int] , _A : List[Any] , _A : int="replace" , _A : List[Any]="<s>" , _A : List[Any]="</s>" , _A : List[Any]="</s>" , _A : Optional[int]="<s>" , _A : List[str]="<unk>" , _A : List[str]="<pad>" , _A : Union[str, Any]="<mask>" , _A : Any=False , **_A : Dict , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
UpperCAmelCase__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
UpperCAmelCase__ : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
UpperCAmelCase__ : Optional[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , )
with open(_A , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase__ : Optional[Any] = json.load(_A )
UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__ : List[str] = errors # how to handle errors in decoding
UpperCAmelCase__ : str = bytes_to_unicode()
UpperCAmelCase__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(_A , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase__ : str = merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__ : Optional[Any] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__ : List[Any] = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return len(self.encoder )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : List[Any] , _A : Tuple ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ : Optional[Any] = tuple(_A )
UpperCAmelCase__ : Dict = get_pairs(_A )
if not pairs:
return token
while True:
UpperCAmelCase__ : Optional[Any] = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ : str = bigram
UpperCAmelCase__ : int = []
UpperCAmelCase__ : Tuple = 0
while i < len(_A ):
try:
UpperCAmelCase__ : Optional[int] = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ : Tuple = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ : Optional[Any] = tuple(_A )
UpperCAmelCase__ : List[Any] = new_word
if len(_A ) == 1:
break
else:
UpperCAmelCase__ : Union[str, Any] = get_pairs(_A )
UpperCAmelCase__ : Optional[Any] = ''' '''.join(_A )
UpperCAmelCase__ : List[Any] = word
return word
def lowercase_ ( self : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = []
for token in re.findall(self.pat , _A ):
UpperCAmelCase__ : str = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(''' ''' ) )
return bpe_tokens
def lowercase_ ( self : List[str] , _A : Any ):
'''simple docstring'''
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : int , _A : List[str] ):
'''simple docstring'''
return self.decoder.get(_A )
def lowercase_ ( self : Tuple , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = ''''''.join(_A )
UpperCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase_ ( self : int , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ : Tuple = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Any = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_A , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' )
UpperCAmelCase__ : Union[str, Any] = 0
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase__ : List[str] = token_index
writer.write(''' '''.join(_A ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self : str , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : List[str] = [self.cls_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def lowercase_ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [self.sep_token_id]
UpperCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : Optional[Any] , _A : Any , _A : Dict=False , **_A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
UpperCAmelCase__ : Tuple = ''' ''' + text
return (text, kwargs)
| 181 | 1 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def snake_case__ ( lowerCamelCase__ : int ) -> List[str]:
A_ : Tuple = {}
A_ : Optional[Any] = tokenizer(example['''content'''] , truncation=lowerCamelCase__ )['''input_ids''']
A_ : Optional[Any] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
snake_case__ = HfArgumentParser(PretokenizationArguments)
snake_case__ = parser.parse_args()
if args.num_workers is None:
snake_case__ = multiprocessing.cpu_count()
snake_case__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
snake_case__ = time.time()
snake_case__ = load_dataset(args.dataset_name, split="""train""")
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
snake_case__ = time.time()
snake_case__ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
snake_case__ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 366 |
'''simple docstring'''
import pprint
import requests
snake_case__ = """https://zenquotes.io/api"""
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def snake_case__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
snake_case__ = random_quotes()
pprint.pprint(response)
| 4 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( snake_case , snake_case , snake_case ):
"""simple docstring"""
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def snake_case_ (_a : Tuple ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def snake_case_ ():
UpperCAmelCase = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_a )
UpperCAmelCase = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_a )
EnvironmentCommand.register_subcommand(_a )
TestCommand.register_subcommand(_a )
RunBeamCommand.register_subcommand(_a )
DummyDataCommand.register_subcommand(_a )
# Parse args
UpperCAmelCase , UpperCAmelCase = parser.parse_known_args()
if not hasattr(_a , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase = parse_unknown_args(_a )
# Run
UpperCAmelCase = args.func(_a , **_a )
service.run()
if __name__ == "__main__":
main()
| 34 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=3_0 , _lowerCamelCase=4_0_0 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 2_5_5 , _lowerCamelCase=True , ):
UpperCamelCase_: List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
UpperCamelCase_: List[Any] = parent
UpperCamelCase_: Optional[int] = batch_size
UpperCamelCase_: List[str] = num_channels
UpperCamelCase_: Dict = min_resolution
UpperCamelCase_: List[Any] = max_resolution
UpperCamelCase_: Dict = do_resize
UpperCamelCase_: str = size
UpperCamelCase_: Any = do_normalize
UpperCamelCase_: Optional[int] = image_mean
UpperCamelCase_: Tuple = image_std
UpperCamelCase_: List[Any] = do_rescale
UpperCamelCase_: List[str] = rescale_factor
UpperCamelCase_: Union[str, Any] = do_pad
def _a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a ( self , _lowerCamelCase , _lowerCamelCase=False ):
if not batched:
UpperCamelCase_: Optional[Any] = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
UpperCamelCase_ ,UpperCamelCase_: Tuple = image.size
else:
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase_: Optional[Any] = int(self.size['shortest_edge'] * h / w )
UpperCamelCase_: Union[str, Any] = self.size['shortest_edge']
elif w > h:
UpperCamelCase_: Optional[Any] = self.size['shortest_edge']
UpperCamelCase_: Optional[Any] = int(self.size['shortest_edge'] * w / h )
else:
UpperCamelCase_: str = self.size['shortest_edge']
UpperCamelCase_: Tuple = self.size['shortest_edge']
else:
UpperCamelCase_: Any = []
for image in image_inputs:
UpperCamelCase_ ,UpperCamelCase_: Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase_: Optional[Any] = max(UpperCamelCase__ , key=lambda _lowerCamelCase : item[0] )[0]
UpperCamelCase_: List[Any] = max(UpperCamelCase__ , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase( __a , unittest.TestCase ):
"""simple docstring"""
a : Dict =ConditionalDetrImageProcessor if is_vision_available() else None
def _a ( self ):
UpperCamelCase_: Optional[Any] = ConditionalDetrImageProcessingTester(self )
@property
def _a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
UpperCamelCase_: Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'image_std' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(UpperCamelCase__ , 'size' ) )
def _a ( self ):
UpperCamelCase_: Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
UpperCamelCase_: List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
def _a ( self ):
pass
def _a ( self ):
UpperCamelCase_: Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_: Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase_: str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ ,UpperCamelCase_: Dict = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
UpperCamelCase_: Optional[int] = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ):
UpperCamelCase_: List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase_: Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: str = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_: List[str] = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: Any = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ):
UpperCamelCase_: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase_: Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_: List[Any] = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: int = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a ( self ):
UpperCamelCase_: List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
UpperCamelCase_: Optional[Any] = json.loads(f.read() )
UpperCamelCase_: int = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
UpperCamelCase_: List[str] = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
UpperCamelCase_: Optional[Any] = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , return_tensors='pt' )
# verify pixel values
UpperCamelCase_: str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase__ )
UpperCamelCase_: Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify area
UpperCamelCase_: Any = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase__ ) )
# verify boxes
UpperCamelCase_: Dict = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase__ )
UpperCamelCase_: Optional[int] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase__ , atol=1e-3 ) )
# verify image_id
UpperCamelCase_: int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase__ ) )
# verify is_crowd
UpperCamelCase_: str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase__ ) )
# verify class_labels
UpperCamelCase_: Union[str, Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase__ ) )
# verify orig_size
UpperCamelCase_: Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase__ ) )
# verify size
UpperCamelCase_: str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase__ ) )
@slow
def _a ( self ):
UpperCamelCase_: List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
UpperCamelCase_: Any = json.loads(f.read() )
UpperCamelCase_: Dict = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
UpperCamelCase_: Any = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
UpperCamelCase_: Optional[int] = ConditionalDetrImageProcessor(format='coco_panoptic' )
UpperCamelCase_: str = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , masks_path=UpperCamelCase__ , return_tensors='pt' )
# verify pixel values
UpperCamelCase_: Union[str, Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , UpperCamelCase__ )
UpperCamelCase_: Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify area
UpperCamelCase_: Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , UpperCamelCase__ ) )
# verify boxes
UpperCamelCase_: int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , UpperCamelCase__ )
UpperCamelCase_: Dict = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , UpperCamelCase__ , atol=1e-3 ) )
# verify image_id
UpperCamelCase_: List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , UpperCamelCase__ ) )
# verify is_crowd
UpperCamelCase_: str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , UpperCamelCase__ ) )
# verify class_labels
UpperCamelCase_: Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , UpperCamelCase__ ) )
# verify masks
UpperCamelCase_: Any = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , UpperCamelCase__ )
# verify orig_size
UpperCamelCase_: int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , UpperCamelCase__ ) )
# verify size
UpperCamelCase_: Optional[int] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , UpperCamelCase__ ) ) | 369 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> np.array:
UpperCamelCase_: Dict = F'''{sampling_rate}'''
UpperCamelCase_: Any = '1'
UpperCamelCase_: Any = 'f32le'
UpperCamelCase_: Union[str, Any] = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(UpperCAmelCase__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCamelCase_: Optional[Any] = ffmpeg_process.communicate(UpperCAmelCase__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
UpperCamelCase_: Union[str, Any] = output_stream[0]
UpperCamelCase_: List[str] = np.frombuffer(UpperCAmelCase__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = "f32le" , ) -> Tuple:
UpperCamelCase_: Any = F'''{sampling_rate}'''
UpperCamelCase_: Union[str, Any] = '1'
if format_for_conversion == "s16le":
UpperCamelCase_: Optional[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: Any = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCamelCase_: int = platform.system()
if system == "Linux":
UpperCamelCase_: Tuple = 'alsa'
UpperCamelCase_: List[str] = 'default'
elif system == "Darwin":
UpperCamelCase_: int = 'avfoundation'
UpperCamelCase_: Union[str, Any] = ':0'
elif system == "Windows":
UpperCamelCase_: Tuple = 'dshow'
UpperCamelCase_: Dict = 'default'
UpperCamelCase_: Any = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCamelCase_: Optional[int] = _ffmpeg_stream(UpperCAmelCase__ , UpperCAmelCase__ )
for item in iterator:
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = "f32le" , ) -> Any:
if stream_chunk_s is not None:
UpperCamelCase_: List[Any] = stream_chunk_s
else:
UpperCamelCase_: Dict = chunk_length_s
UpperCamelCase_: List[str] = ffmpeg_microphone(UpperCAmelCase__ , UpperCAmelCase__ , format_for_conversion=UpperCAmelCase__ )
if format_for_conversion == "s16le":
UpperCamelCase_: Union[str, Any] = np.intaa
UpperCamelCase_: List[Any] = 2
elif format_for_conversion == "f32le":
UpperCamelCase_: str = np.floataa
UpperCamelCase_: Tuple = 4
else:
raise ValueError(F'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCamelCase_: int = chunk_length_s / 6
UpperCamelCase_: Tuple = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCAmelCase__ , (int, float) ):
UpperCamelCase_: Union[str, Any] = [stride_length_s, stride_length_s]
UpperCamelCase_: Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCamelCase_: Dict = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCamelCase_: Optional[int] = datetime.datetime.now()
UpperCamelCase_: Optional[int] = datetime.timedelta(seconds=UpperCAmelCase__ )
for item in chunk_bytes_iter(UpperCAmelCase__ , UpperCAmelCase__ , stride=(stride_left, stride_right) , stream=UpperCAmelCase__ ):
# Put everything back in numpy scale
UpperCamelCase_: Tuple = np.frombuffer(item['raw'] , dtype=UpperCAmelCase__ )
UpperCamelCase_: Optional[int] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
UpperCamelCase_: int = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ) -> int:
UpperCamelCase_: str = b''
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCamelCase_: List[str] = 0
for raw in iterator:
acc += raw
if stream and len(UpperCAmelCase__ ) < chunk_len:
UpperCamelCase_: Optional[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCAmelCase__ ) >= chunk_len:
# We are flushing the accumulator
UpperCamelCase_: int = (_stride_left, stride_right)
UpperCamelCase_: Optional[Any] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
UpperCamelCase_: Any = False
yield item
UpperCamelCase_: Optional[int] = stride_left
UpperCamelCase_: Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCAmelCase__ ) > stride_left:
UpperCamelCase_: int = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
UpperCamelCase_: Optional[Any] = False
yield item
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int:
UpperCamelCase_: Any = 2**2_4 # 16Mo
try:
with subprocess.Popen(UpperCAmelCase__ , stdout=subprocess.PIPE , bufsize=UpperCAmelCase__ ) as ffmpeg_process:
while True:
UpperCamelCase_: Any = ffmpeg_process.stdout.read(UpperCAmelCase__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error | 292 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__UpperCAmelCase = '''hf-internal-testing/tiny-random-bert'''
__UpperCAmelCase = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
__UpperCAmelCase = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[int] = cached_file(_A , _A )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_A ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_A , _A ) ) )
with open(os.path.join(_A , """refs""" , """main""" ) ) as f:
lowerCAmelCase_ :Tuple = f.read()
self.assertEqual(_A , os.path.join(_A , """snapshots""" , _A , _A ) )
self.assertTrue(os.path.isfile(_A ) )
# File is cached at the same place the second time.
lowerCAmelCase_ :Dict = cached_file(_A , _A )
self.assertEqual(_A , _A )
# Using a specific revision to test the full commit hash.
lowerCAmelCase_ :List[str] = cached_file(_A , _A , revision="""9b8c223""" )
self.assertEqual(_A , os.path.join(_A , """snapshots""" , _A , _A ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
with self.assertRaisesRegex(_A , """is not a valid model identifier""" ):
lowerCAmelCase_ :Dict = cached_file("""tiny-random-bert""" , _A )
with self.assertRaisesRegex(_A , """is not a valid git identifier""" ):
lowerCAmelCase_ :List[str] = cached_file(_A , _A , revision="""aaaa""" )
with self.assertRaisesRegex(_A , """does not appear to have a file named""" ):
lowerCAmelCase_ :str = cached_file(_A , """conf""" )
def __lowerCAmelCase ( self ) -> Optional[int]:
with self.assertRaisesRegex(_A , """does not appear to have a file named""" ):
lowerCAmelCase_ :str = cached_file(_A , """conf""" )
with open(os.path.join(_A , """refs""" , """main""" ) ) as f:
lowerCAmelCase_ :Optional[int] = f.read()
self.assertTrue(os.path.isfile(os.path.join(_A , """.no_exist""" , _A , """conf""" ) ) )
lowerCAmelCase_ :Dict = cached_file(_A , """conf""" , _raise_exceptions_for_missing_entries=_A )
self.assertIsNone(_A )
lowerCAmelCase_ :Optional[int] = cached_file(_A , """conf""" , local_files_only=_A , _raise_exceptions_for_missing_entries=_A )
self.assertIsNone(_A )
lowerCAmelCase_ :Optional[int] = mock.Mock()
lowerCAmelCase_ :Any = 500
lowerCAmelCase_ :str = {}
lowerCAmelCase_ :str = HTTPError
lowerCAmelCase_ :Any = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=_A ) as mock_head:
lowerCAmelCase_ :Dict = cached_file(_A , """conf""" , _raise_exceptions_for_connection_errors=_A )
self.assertIsNone(_A )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _A ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _A ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , _A ) )
def __lowerCAmelCase ( self ) -> Any:
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_A , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , _A )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_A , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , _A , revision="""ahaha""" )
lowerCAmelCase_ :List[str] = get_file_from_repo("""bert-base-cased""" , _A )
# The name is the cached name which is not very easy to test, so instead we load the content.
lowerCAmelCase_ :Any = json.loads(open(_A , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 768 )
def __lowerCAmelCase ( self ) -> str:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ :Tuple = Path(_A ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(_A , """a.txt""" ) , str(_A ) )
self.assertIsNone(get_file_from_repo(_A , """b.txt""" ) )
| 84 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any =logging.get_logger(__name__)
lowerCAmelCase__ : str ={
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = '''unispeech-sat'''
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3_072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.0_2 , _A=1e-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=False , _A=True , _A=0.0_5 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1_500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=504 , **_A , ):
'''simple docstring'''
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = num_clusters
__SCREAMING_SNAKE_CASE = do_stable_layer_norm
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE = num_codevectors_per_group
__SCREAMING_SNAKE_CASE = num_codevector_groups
__SCREAMING_SNAKE_CASE = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE = num_negatives
__SCREAMING_SNAKE_CASE = codevector_dim
__SCREAMING_SNAKE_CASE = proj_codevector_dim
__SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def _A ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 257 | 0 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowercase : Optional[int] = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowercase : Tuple = [0, 25, 50]
lowercase : str = [25, 50, 75]
lowercase : List[Any] = fuzz.membership.trimf(X, abca)
lowercase : List[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowercase : Any = np.ones(75)
lowercase : Optional[Any] = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowercase : str = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowercase : Optional[int] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowercase : Dict = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowercase : Optional[Any] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowercase : Any = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowercase : Dict = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowercase : int = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowercase : Optional[int] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 360 |
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : str) -> Any:
'''simple docstring'''
__UpperCamelCase : Dict = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple) -> Any:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = 0
while b > 0:
if b & 1:
__UpperCamelCase : str = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res | 151 | 0 |
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = False ) -> str:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowerCAmelCase : str = f"Expected string as input, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowerCAmelCase : Dict = f"Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}"
raise ValueError(_UpperCAmelCase )
lowerCAmelCase : List[Any] = input_str.split('_' )
lowerCAmelCase : Tuple = 0 if use_pascal else 1
lowerCAmelCase : Any = words[start_index:]
lowerCAmelCase : Union[str, Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
lowerCAmelCase : List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 138 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__A : Union[str, Any] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py')
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=None ) -> Dict:
'''simple docstring'''
require_version(deps[pkg], _UpperCAmelCase )
| 138 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ = logging.get_logger(__name__)
snake_case__ = {
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = 'table-transformer'
_lowerCAmelCase = ['past_key_values']
_lowerCAmelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Any , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Dict=None , _lowerCamelCase : int=3 , _lowerCamelCase : Any=100 , _lowerCamelCase : List[Any]=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : Any=8 , _lowerCamelCase : Dict=6 , _lowerCamelCase : Tuple=2048 , _lowerCamelCase : int=8 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : List[Any]=0.0 , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Union[str, Any]=256 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : str=0.02 , _lowerCamelCase : Tuple=1.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str="sine" , _lowerCamelCase : str="resnet50" , _lowerCamelCase : Any=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=1 , _lowerCamelCase : int=5 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Optional[int]=1 , _lowerCamelCase : Any=1 , _lowerCamelCase : Dict=5 , _lowerCamelCase : str=2 , _lowerCamelCase : Union[str, Any]=0.1 , **_lowerCamelCase : int , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A_ : int = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
A_ : str = backbone_config.get('''model_type''' )
A_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
A_ : List[str] = config_class.from_dict(_lowerCamelCase )
# set timm attributes to None
A_ ,A_ ,A_ : Union[str, Any] = None, None, None
A_ : Optional[Any] = use_timm_backbone
A_ : Optional[int] = backbone_config
A_ : Optional[Any] = num_channels
A_ : Dict = num_queries
A_ : str = d_model
A_ : List[str] = encoder_ffn_dim
A_ : int = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[str] = decoder_ffn_dim
A_ : Any = decoder_layers
A_ : List[str] = decoder_attention_heads
A_ : Tuple = dropout
A_ : Optional[Any] = attention_dropout
A_ : Any = activation_dropout
A_ : List[Any] = activation_function
A_ : Dict = init_std
A_ : Any = init_xavier_std
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Any = encoder_layers
A_ : List[str] = auxiliary_loss
A_ : List[Any] = position_embedding_type
A_ : Optional[Any] = backbone
A_ : Tuple = use_pretrained_backbone
A_ : List[Any] = dilation
# Hungarian matcher
A_ : List[str] = class_cost
A_ : str = bbox_cost
A_ : Union[str, Any] = giou_cost
# Loss coefficients
A_ : Any = mask_loss_coefficient
A_ : Optional[int] = dice_loss_coefficient
A_ : Dict = bbox_loss_coefficient
A_ : int = giou_loss_coefficient
A_ : int = eos_coefficient
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _a ( self : Any ):
"""simple docstring"""
return self.d_model
class UpperCamelCase_ (a__ ):
"""simple docstring"""
_lowerCAmelCase = version.parse('1.11' )
@property
def _a ( self : Tuple ):
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def _a ( self : Optional[int] ):
"""simple docstring"""
return 1E-5
@property
def _a ( self : str ):
"""simple docstring"""
return 12
| 4 |
'''simple docstring'''
import heapq
def snake_case__ ( lowerCamelCase__ : dict ) -> set[int]:
A_ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowerCamelCase__ , [-1 * len(lowerCamelCase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
A_ : str = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
A_ : Tuple = heapq.heappop(lowerCamelCase__ )[1][0]
chosen_vertices.add(lowerCamelCase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
A_ : List[str] = elem[1][1].index(lowerCamelCase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowerCamelCase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}')
| 4 | 1 |
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowercase ( A_ )-> List[str]:
'''simple docstring'''
a : List[str] = {}
a : Optional[Any] = tokenizer(example["content"] , truncation=A_ )["input_ids"]
a : Optional[Any] = len(example["content"] ) / len(output["input_ids"] )
return output
__lowercase = HfArgumentParser(PretokenizationArguments)
__lowercase = parser.parse_args()
if args.num_workers is None:
__lowercase = multiprocessing.cpu_count()
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowercase = time.time()
__lowercase = load_dataset(args.dataset_name, split="""train""")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowercase = time.time()
__lowercase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowercase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 40 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = LayoutLMTokenizer
UpperCAmelCase : int = LayoutLMTokenizerFast
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[Any] = True
def __snake_case ( self : Optional[int]):
super().setUp()
a : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str):
a : Tuple = "UNwant\u00E9d,running"
a : Dict = "unwanted, running"
return input_text, output_text
def __snake_case ( self : Any):
a : List[Any] = self.tokenizer_class(self.vocab_file)
a : str = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9])
def __snake_case ( self : Dict):
pass
| 40 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a )
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =field(default='''automatic-speech-recognition''' ,metadata={'''include_in_asdict_even_if_is_default''': True} )
a__ =Features({'''audio''': Audio()} )
a__ =Features({'''transcription''': Value('''string''' )} )
a__ ="audio"
a__ ="transcription"
def __lowerCAmelCase ( self , A ) -> Tuple:
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , A ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self )
_UpperCAmelCase : Tuple = self.input_schema.copy()
_UpperCAmelCase : List[Any] = features[self.audio_column]
_UpperCAmelCase : List[str] = input_schema
return task_template
@property
def __lowerCAmelCase ( self ) -> Dict[str, str]:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 68 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase :Optional[Any] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 68 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 9 |
"""simple docstring"""
from __future__ import annotations
__A = tuple[int, int, int]
__A = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__A = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
__A = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
__A = """FOBHMDKEXQNRAULPGSJVTYICZW"""
__A = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
__A = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
__A = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
__A = """SGLCPQWZHKXAREONTFBVIYJUDM"""
__A = """HVSICLTYKQUBXDWAJZOMFGPREN"""
__A = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
__A = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
__A = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
"""simple docstring"""
if (unique_rotsel := len(set(_SCREAMING_SNAKE_CASE ) )) < 3:
lowerCAmelCase__ :Union[str, Any] = F"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(_SCREAMING_SNAKE_CASE )
# Checks if rotor positions are valid
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = rotpos
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Tuple = F"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = F"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_SCREAMING_SNAKE_CASE )
if not 0 < rotorposa <= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Union[str, Any] = F"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_SCREAMING_SNAKE_CASE )
# Validates string and returns dict
lowerCAmelCase__ :int = _plugboard(_SCREAMING_SNAKE_CASE )
return rotpos, rotsel, pbdict
def __A (_SCREAMING_SNAKE_CASE ) ->dict[str, str]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = F"Plugboard setting isn't type string ({type(_SCREAMING_SNAKE_CASE )})"
raise TypeError(_SCREAMING_SNAKE_CASE )
elif len(_SCREAMING_SNAKE_CASE ) % 2 != 0:
lowerCAmelCase__ :str = F"Odd number of symbols ({len(_SCREAMING_SNAKE_CASE )})"
raise Exception(_SCREAMING_SNAKE_CASE )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowerCAmelCase__ :Any = set()
for i in pbstring:
if i not in abc:
lowerCAmelCase__ :Any = F"'{i}' not in list of symbols"
raise Exception(_SCREAMING_SNAKE_CASE )
elif i in tmppbl:
lowerCAmelCase__ :Dict = F"Duplicate symbol ({i})"
raise Exception(_SCREAMING_SNAKE_CASE )
else:
tmppbl.add(_SCREAMING_SNAKE_CASE )
del tmppbl
# Created the dictionary
lowerCAmelCase__ :List[Any] = {}
for j in range(0 , len(_SCREAMING_SNAKE_CASE ) - 1 , 2 ):
lowerCAmelCase__ :Optional[int] = pbstring[j + 1]
lowerCAmelCase__ :Union[str, Any] = pbstring[j]
return pb
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (rotora, rotora, rotora) , _SCREAMING_SNAKE_CASE = "" , ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Tuple = text.upper()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = _validator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , plugb.upper() )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = rotor_position
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCAmelCase__ :Dict = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCAmelCase__ :Dict = plugboard[symbol]
# rotor ra --------------------------
lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :str = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rb --------------------------
lowerCAmelCase__ :Optional[int] = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :int = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# rotor rc --------------------------
lowerCAmelCase__ :str = abc.index(_SCREAMING_SNAKE_CASE ) + rotorposa
lowerCAmelCase__ :Optional[Any] = rotora[index % len(_SCREAMING_SNAKE_CASE )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCAmelCase__ :str = reflector[symbol]
# 2nd rotors
lowerCAmelCase__ :Tuple = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
lowerCAmelCase__ :Optional[int] = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
lowerCAmelCase__ :Any = abc[rotora.index(_SCREAMING_SNAKE_CASE ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCAmelCase__ :Union[str, Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :List[Any] = 0
rotorposa += 1
if rotorposa >= len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A = """This is my Python script that emulates the Enigma machine from WWII."""
__A = (1, 1, 1)
__A = """pictures"""
__A = (rotora, rotora, rotora)
__A = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 293 | 0 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Any:
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = 8
# DPR tok
UpperCamelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(__A , exist_ok=__A )
UpperCamelCase = os.path.join(__A , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
UpperCamelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCamelCase = dict(zip(__A , range(len(__A ) ) ) )
UpperCamelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCamelCase = {"unk_token": "<unk>"}
UpperCamelCase = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(__A , exist_ok=__A )
UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase = os.path.join(__A , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def snake_case_ (self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def snake_case_ (self ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def snake_case_ (self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def snake_case_ (self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def snake_case_ (self ) -> Dict:
UpperCamelCase = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = self.get_dummy_dataset()
UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCamelCase = dataset
UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def snake_case_ (self , __a ) -> Any:
UpperCamelCase = self.get_dummy_dataset()
UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
UpperCamelCase = os.path.join(self.tmpdirname , "dataset" )
UpperCamelCase = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __A ) , )
return retriever
def snake_case_ (self ) -> Dict:
UpperCamelCase = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
UpperCamelCase = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
UpperCamelCase = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(__A , open(__A , "wb" ) )
UpperCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
UpperCamelCase = RagRetriever(
__A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = 1
UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase , UpperCamelCase , UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __A )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
UpperCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__A )
UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def snake_case_ (self ) -> Any:
UpperCamelCase = 1
UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase , UpperCamelCase , UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __A )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = 1
UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase , UpperCamelCase , UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , __A )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ (self ) -> Any:
UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = 1
UpperCamelCase = self.get_dummy_legacy_index_retriever()
UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase , UpperCamelCase , UpperCamelCase = retriever.retrieve(__A , n_docs=__A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , __A )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__A )
UpperCamelCase = RagRetriever.from_pretrained(__A )
self.assertIsInstance(__A , __A )
UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase = retriever.retrieve(__A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case_ (self ) -> Union[str, Any]:
import torch
UpperCamelCase = 1
UpperCamelCase = self.get_dummy_canonical_hf_index_retriever()
UpperCamelCase = [[5, 7], [10, 11]]
UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
UpperCamelCase , UpperCamelCase , UpperCamelCase = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , __A )
self.assertIsInstance(__A , np.ndarray )
UpperCamelCase = retriever(
__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A , return_tensors="pt" , )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
self.assertIsInstance(__A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def snake_case_ (self ) -> Dict:
UpperCamelCase = self.get_dpr_ctx_encoder_tokenizer()
UpperCamelCase = 1
UpperCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__A )
retriever.set_ctx_encoder_tokenizer(__A )
UpperCamelCase = [[5, 7], [10, 11]]
UpperCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCamelCase = retriever(__A , __A , prefix=retriever.config.generator.prefix , n_docs=__A )
self.assertEqual(
len(__A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , __A ) # check for doc token related keys in dictionary.
| 359 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] ={
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] =[
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
a__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 53 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=1 ) -> Dict:
_lowercase =tokenizer
_lowercase =dataset
_lowercase =len(UpperCAmelCase ) if n_tasks is None else n_tasks
_lowercase =n_copies
def __iter__(self ) -> Optional[Any]:
_lowercase =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_lowercase =self.tokenizer(UpperCAmelCase , padding=UpperCAmelCase , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
_lowercase =start_length
_lowercase =eof_strings
_lowercase =tokenizer
def __call__(self , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
_lowercase =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_lowercase =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCAmelCase )
def UpperCAmelCase_ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
_lowercase =re.split('''(%s)''' % '''|'''.join(__snake_case ) , __snake_case )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=20 , **__snake_case ) -> Tuple:
"""simple docstring"""
_lowercase =defaultdict(__snake_case ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__snake_case ) ):
with torch.no_grad():
_lowercase =batch['''ids'''].shape[-1]
_lowercase =accelerator.unwrap_model(__snake_case ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__snake_case , **__snake_case )
# each task is generated batch_size times
_lowercase =batch['''task_id'''].repeat(__snake_case )
_lowercase =accelerator.pad_across_processes(
__snake_case , dim=1 , pad_index=tokenizer.pad_token_id )
_lowercase , _lowercase =accelerator.gather((generated_tokens, generated_tasks) )
_lowercase =generated_tokens.cpu().numpy()
_lowercase =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__snake_case , __snake_case ):
gen_token_dict[task].append(__snake_case )
_lowercase =[[] for _ in range(__snake_case )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_lowercase =tokenizer.decode(__snake_case , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
code_gens[task].append(remove_last_block(__snake_case ) )
return code_gens
def UpperCAmelCase_ ( ) -> str:
"""simple docstring"""
_lowercase =HfArgumentParser(__snake_case )
_lowercase =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_lowercase =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_lowercase ='''false'''
if args.num_workers is None:
_lowercase =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_lowercase =Accelerator()
set_seed(args.seed , device_specific=__snake_case )
# Load model and tokenizer
_lowercase =AutoTokenizer.from_pretrained(args.model_ckpt )
_lowercase =tokenizer.eos_token
_lowercase =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_lowercase ={
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __snake_case , __snake_case )] ),
}
# Load evaluation dataset and metric
_lowercase =load_dataset('''openai_humaneval''' )
_lowercase =load_metric('''code_eval''' )
_lowercase =args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_lowercase =args.n_samples // args.batch_size
_lowercase =TokenizedDataset(__snake_case , human_eval['''test'''] , n_copies=__snake_case , n_tasks=__snake_case )
# do not confuse args.batch_size, which is actually the num_return_sequences
_lowercase =DataLoader(__snake_case , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_lowercase =code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_lowercase , _lowercase =accelerator.prepare(__snake_case , __snake_case )
_lowercase =complete_code(
__snake_case , __snake_case , __snake_case , __snake_case , n_tasks=__snake_case , batch_size=args.batch_size , **__snake_case , )
if accelerator.is_main_process:
_lowercase =[]
for task in tqdm(range(__snake_case ) ):
_lowercase =human_eval['''test'''][task]['''test''']
_lowercase =F"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_lowercase , _lowercase =code_eval_metric.compute(
references=__snake_case , predictions=__snake_case , num_workers=args.num_workers )
print(F"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__snake_case , __snake_case )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 5 | 0 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
__lowerCamelCase : Tuple = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True ) -> Union[str, Any]:
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
UpperCamelCase : Union[str, Any] = cached_file(_lowerCAmelCase , _lowerCAmelCase , force_download=not use_cached_models )
UpperCamelCase : Union[str, Any] = config_class.from_json_file(_lowerCAmelCase )
UpperCamelCase : List[Any] = True
UpperCamelCase : Optional[Any] = True
print(F"""Building TensorFlow model from configuration: {config}""" )
UpperCamelCase : Tuple = model_class(_lowerCAmelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
UpperCamelCase : int = cached_file(
_lowerCAmelCase , _lowerCAmelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
UpperCamelCase : Union[str, Any] = load_pytorch_checkpoint_in_tfa_model(_lowerCAmelCase , _lowerCAmelCase )
if compare_with_pt_model:
UpperCamelCase : Optional[int] = tf_model(tf_model.dummy_inputs , training=_lowerCAmelCase ) # build the network
UpperCamelCase : Optional[int] = torch.load(_lowerCAmelCase , map_location="cpu" )
UpperCamelCase : List[Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_lowerCAmelCase , config=_lowerCAmelCase , state_dict=_lowerCAmelCase )
with torch.no_grad():
UpperCamelCase : Any = pt_model(**pt_model.dummy_inputs )
UpperCamelCase : Optional[int] = pto[0].numpy()
UpperCamelCase : Optional[Any] = tfo[0].numpy()
UpperCamelCase : Optional[Any] = np.amax(np.abs(np_pt - np_tf ) )
print(F"""Max absolute difference between models outputs {diff}""" )
assert diff <= 2e-2, F"""Error, model absolute difference is >2e-2: {diff}"""
# Save pytorch-model
print(F"""Save TensorFlow model to {tf_dump_path}""" )
tf_model.save_weights(_lowerCAmelCase , save_format="h5" )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[Any]:
if args_model_type is None:
UpperCamelCase : Optional[Any] = list(MODEL_CLASSES.keys() )
else:
UpperCamelCase : str = [args_model_type]
for j, model_type in enumerate(_lowerCAmelCase , start=1 ):
print("=" * 100 )
print(F""" Converting model type {j}/{len(_lowerCAmelCase )}: {model_type}""" )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
UpperCamelCase : Union[str, Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
UpperCamelCase : Optional[int] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_lowerCAmelCase , _lowerCAmelCase ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" )
continue
UpperCamelCase : Optional[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" )
continue
print(
F""" Converting checkpoint {i}/{len(_lowerCAmelCase )}: {model_shortcut_name} - model_type {model_type}""" )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
UpperCamelCase : str = cached_file(_lowerCAmelCase , _lowerCAmelCase , force_download=not use_cached_models )
else:
UpperCamelCase : Union[str, Any] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
UpperCamelCase : Union[str, Any] = cached_file(_lowerCAmelCase , _lowerCAmelCase , force_download=not use_cached_models )
else:
UpperCamelCase : List[str] = model_shortcut_name
if os.path.isfile(_lowerCAmelCase ):
UpperCamelCase : Optional[Any] = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=_lowerCAmelCase , pytorch_checkpoint_path=_lowerCAmelCase , config_file=_lowerCAmelCase , tf_dump_path=os.path.join(_lowerCAmelCase , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=_lowerCAmelCase , )
if remove_cached_files:
os.remove(_lowerCAmelCase )
os.remove(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
__lowerCamelCase : int = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 140 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Dict = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :int = AlbertTokenizer
_UpperCAmelCase :int = AlbertTokenizerFast
_UpperCAmelCase :int = True
_UpperCAmelCase :List[str] = True
_UpperCAmelCase :Optional[Any] = True
def __UpperCamelCase( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase : List[str] = AlbertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = "this is a test"
UpperCamelCase : List[Any] = "this is a test"
return input_text, output_text
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = "<pad>"
UpperCamelCase : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(A_ ) , 3_0000 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def __UpperCamelCase( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase : Any = self.get_tokenizer()
UpperCamelCase : Optional[int] = self.get_rust_tokenizer()
UpperCamelCase : List[Any] = "I was born in 92000, and this is falsé."
UpperCamelCase : Optional[Any] = tokenizer.tokenize(A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : Optional[Any] = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase : List[Any] = self.get_rust_tokenizer()
UpperCamelCase : Union[str, Any] = tokenizer.encode(A_ )
UpperCamelCase : Optional[int] = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = AlbertTokenizer(A_ , keep_accents=A_ )
UpperCamelCase : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(A_ , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [48, 25, 21, 1289] )
UpperCamelCase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
UpperCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(A_ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = AlbertTokenizer(A_ )
UpperCamelCase : Dict = tokenizer.encode("sequence builders" )
UpperCamelCase : Tuple = tokenizer.encode("multi-sequence build" )
UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(A_ )
UpperCamelCase : int = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 140 | 1 |
"""simple docstring"""
import numpy as np
def lowercase__ ( snake_case_ :np.ndarray , snake_case_ :float ):
return np.where(vector > 0 , snake_case_ , (alpha * (np.exp(snake_case_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : str ) -> List[str]:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
lowerCAmelCase = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
lowerCAmelCase = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
lowerCAmelCase = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
lowerCAmelCase = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCAmelCase = model(UpperCAmelCase__ )['last_hidden_state'].detach()
self.assertEqual(output.shape , UpperCAmelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase__ , atol=1E-3 ) )
| 4 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : Any = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : List[str] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""CLIPFeatureExtractor"""]
lowercase : Union[str, Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.